← Назад"""
Step 6: Sweep runner — iterate all 8640 param combos, save results to JSON.
Usage:
python3 backtests/sweep_runner.py --days 7
python3 backtests/sweep_runner.py --days 14
Output: backtests/sweep_results_7d.json (or 14d)
"""
import os
import sys
import json
import time
import pickle
import argparse
from itertools import product
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from backtests.backtest_core import run_backtest
# Parameter grid from Rick's plan
Z_ENTRY_VALUES = [1.9, 2.0, 2.2, 2.5, 3.0]
Z_MAX_VALUES = [3.0, 3.5, 4.0, 5.0]
NATR_MIN_VALUES = [0.5, 0.75, 1.0]
NATR_MAX_VALUES = [2.0, 2.5, 3.0, 3.5]
CHOP_MIN_VALUES = [45, 50, 55]
# R:R grid (TP/SL pairs)
RR_PAIRS = [
# R:R 1:2
{"rr": "2:1", "tp_pct": 1.0, "sl_pct": 0.5},
{"rr": "2:1", "tp_pct": 2.0, "sl_pct": 1.0},
{"rr": "2:1", "tp_pct": 3.0, "sl_pct": 1.5},
# R:R 1:3
{"rr": "3:1", "tp_pct": 1.5, "sl_pct": 0.5},
{"rr": "3:1", "tp_pct": 3.0, "sl_pct": 1.0},
{"rr": "3:1", "tp_pct": 4.5, "sl_pct": 1.5},
# R:R 1:4
{"rr": "4:1", "tp_pct": 2.0, "sl_pct": 0.5},
{"rr": "4:1", "tp_pct": 4.0, "sl_pct": 1.0},
{"rr": "4:1", "tp_pct": 6.0, "sl_pct": 1.5},
# R:R 1:5
{"rr": "5:1", "tp_pct": 2.5, "sl_pct": 0.5},
{"rr": "5:1", "tp_pct": 5.0, "sl_pct": 1.0},
{"rr": "5:1", "tp_pct": 7.5, "sl_pct": 1.5},
]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--days", type=int, default=7)
args = parser.parse_args()
cache_path = os.path.join(os.path.dirname(__file__), f"data_cache_{args.days}d.pkl")
if not os.path.exists(cache_path):
print(f"❌ Cache not found: {cache_path}")
print(f" Run: python3 backtests/download_data.py --days {args.days}")
sys.exit(1)
print(f"Loading {cache_path}...")
with open(cache_path, "rb") as f:
cache = pickle.load(f)
symbols = cache["symbols"]
print(f" {len(symbols)} symbols, date: {cache['date']}")
# Build all combos
filter_combos = list(product(
Z_ENTRY_VALUES, Z_MAX_VALUES, NATR_MIN_VALUES, NATR_MAX_VALUES, CHOP_MIN_VALUES
))
total = len(filter_combos) * len(RR_PAIRS)
print(f"Sweep: {len(filter_combos)} filter combos × {len(RR_PAIRS)} R:R pairs = {total} total")
results = []
start = time.time()
done = 0
for z_entry, z_max, natr_min, natr_max, chop_min in filter_combos:
# Skip invalid combos (z_entry >= z_max makes no sense)
if z_entry >= z_max:
done += len(RR_PAIRS)
continue
for rr in RR_PAIRS:
params = {
"z_entry": z_entry,
"z_max": z_max,
"natr_min": natr_min,
"natr_max": natr_max,
"chop_min": chop_min,
"tp_pct": rr["tp_pct"],
"sl_pct": rr["sl_pct"],
}
r = run_backtest(symbols, params)
done += 1
# Only save combos with at least 10 trades (statistical minimum)
if r["trades"] >= 10:
results.append({
**params,
"rr": rr["rr"],
**r,
})
# Progress every 500
if done % 500 == 0:
elapsed = time.time() - start
rate = done / elapsed
eta = (total - done) / rate if rate > 0 else 0
print(f" [{done}/{total}] {rate:.0f} combos/sec, ETA {eta:.0f}s | saved {len(results)} results")
elapsed = time.time() - start
# Sort by PnL descending
results.sort(key=lambda x: x["pnl"], reverse=True)
out_path = os.path.join(os.path.dirname(__file__), f"sweep_results_{args.days}d.json")
with open(out_path, "w") as f:
json.dump({
"days": args.days,
"date": cache["date"],
"total_combos": total,
"valid_results": len(results),
"elapsed_sec": round(elapsed, 1),
"results": results,
}, f, indent=2)
print(f"\n✅ Done in {elapsed:.1f}s — {len(results)} valid results saved to {out_path}")
# Quick preview: top 5
print(f"\nTop 5 by PnL:")
print(f"{'R:R':<5} {'TP/SL':<8} {'Z':>7} {'Zmax':>5} {'NATR':>9} {'CHOP':>4} {'Tr':>4} {'WR%':>5} {'PnL':>7} {'PF':>5}")
print("-" * 70)
for r in results[:5]:
print(f"{r['rr']:<5} {r['tp_pct']}/{r['sl_pct']:<5} "
f"{r['z_entry']:>5.1f} {r['z_max']:>5.1f} "
f"{r['natr_min']:.2f}-{r['natr_max']:.1f} "
f"{r['chop_min']:>4} {r['trades']:>4} {r['wr']:>4.1f}% "
f"{r['pnl']:>+6.1f} {r['pf']:>5.2f}")
if __name__ == "__main__":
main()