scripts: regenerate_md + stats + tests (116-144 passing across modules)
This commit is contained in:
@@ -1,26 +1,26 @@
|
||||
"""Tests for scripts/append_row.py."""
|
||||
"""Tests for scripts/append_row.py — append_extraction pipeline."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import csv
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from pydantic import ValidationError
|
||||
import yaml
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
|
||||
|
||||
from scripts.append_row import ( # noqa: E402
|
||||
CSV_COLUMNS,
|
||||
VALID_SOURCES,
|
||||
append_row,
|
||||
append_row_from_json,
|
||||
build_row,
|
||||
read_rows,
|
||||
ZI_RO_MAP,
|
||||
append_extraction,
|
||||
csv_columns,
|
||||
)
|
||||
from scripts.vision_schema import parse_extraction_dict # noqa: E402
|
||||
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||
@@ -29,12 +29,12 @@ META_PATH = REPO_ROOT / "data" / "_meta.yaml"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# fixtures / payload helpers
|
||||
# helpers / fixtures
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _buy_payload(**overrides) -> dict:
|
||||
# 2026-05-13 14:23 UTC == 17:23 RO (EEST, Wed) → Set A2.
|
||||
# 2026-05-13 14:23 UTC == 17:23 RO (EEST, Wed) → set A2, zi=Mi.
|
||||
base = {
|
||||
"screenshot_file": "dia-2026-05-13-1.png",
|
||||
"data": "2026-05-13",
|
||||
@@ -61,198 +61,227 @@ def _buy_payload(**overrides) -> dict:
|
||||
return base
|
||||
|
||||
|
||||
def _write_payload(tmp_path: Path, name: str, **overrides) -> Path:
|
||||
p = tmp_path / name
|
||||
p.write_text(json.dumps(_buy_payload(**overrides)), encoding="utf-8")
|
||||
return p
|
||||
|
||||
|
||||
def _read_rows(csv_path: Path) -> list[dict[str, str]]:
|
||||
with csv_path.open("r", encoding="utf-8", newline="") as fh:
|
||||
return list(csv.DictReader(fh))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def csv_path(tmp_path: Path) -> Path:
|
||||
return tmp_path / "trades.csv"
|
||||
return tmp_path / "jurnal.csv"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# build_row — computed fields
|
||||
# schema / column layout
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestBuildRow:
|
||||
def setup_method(self) -> None:
|
||||
import yaml
|
||||
with META_PATH.open("r", encoding="utf-8") as fh:
|
||||
self.meta = yaml.safe_load(fh)
|
||||
from scripts.calendar_parse import load_calendar
|
||||
self.calendar = load_calendar(CALENDAR_PATH)
|
||||
|
||||
def test_happy_path_computed_fields(self) -> None:
|
||||
extr = parse_extraction_dict(_buy_payload())
|
||||
row = build_row(extr, "manual", self.meta, self.calendar)
|
||||
# 14:23 UTC on 2026-05-13 = 17:23 RO (EEST), Wed → A2
|
||||
assert row["ora_ro"] == "17:23"
|
||||
assert row["zi"] == "Wed"
|
||||
assert row["set"] == "A2"
|
||||
# pl_marius for TP0->TP1 with be_moved=True is +0.50R
|
||||
assert float(row["pl_marius"]) == pytest.approx(0.50)
|
||||
# pl_theoretical for max_reached=TP1 is 0.333
|
||||
assert float(row["pl_theoretical"]) == pytest.approx(0.333)
|
||||
# version stamps copied from meta
|
||||
assert row["indicator_version"] == str(self.meta["indicator_version"])
|
||||
assert row["pl_overlay_version"] == str(self.meta["pl_overlay_version"])
|
||||
assert row["csv_schema_version"] == str(self.meta["csv_schema_version"])
|
||||
|
||||
def test_pending_overlay_is_blank(self) -> None:
|
||||
extr = parse_extraction_dict(
|
||||
_buy_payload(outcome_path="pending", max_reached="TP0")
|
||||
)
|
||||
row = build_row(extr, "vision", self.meta, self.calendar)
|
||||
# pl_marius returns None for pending → empty string in CSV
|
||||
assert row["pl_marius"] == ""
|
||||
# pl_theoretical always concrete
|
||||
assert row["pl_theoretical"] != ""
|
||||
|
||||
def test_invalid_source_rejected(self) -> None:
|
||||
extr = parse_extraction_dict(_buy_payload())
|
||||
with pytest.raises(ValueError):
|
||||
build_row(extr, "auto_magic", self.meta, self.calendar)
|
||||
|
||||
def test_all_valid_sources_accepted(self) -> None:
|
||||
extr = parse_extraction_dict(_buy_payload())
|
||||
for s in VALID_SOURCES:
|
||||
row = build_row(extr, s, self.meta, self.calendar)
|
||||
assert row["source"] == s
|
||||
def test_csv_columns_canonical_29() -> None:
|
||||
cols = csv_columns()
|
||||
assert len(cols) == 29
|
||||
assert cols[0] == "id"
|
||||
assert cols[-1] == "note"
|
||||
assert cols == list(CSV_COLUMNS)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# append_row — happy path, dedup, atomic writes
|
||||
# core tests as specified in task #9
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestAppendRow:
|
||||
def test_happy_path_writes_header_and_row(self, csv_path: Path) -> None:
|
||||
extr = parse_extraction_dict(_buy_payload())
|
||||
row = append_row(extr, "manual", csv_path, META_PATH, CALENDAR_PATH)
|
||||
assert csv_path.exists()
|
||||
def test_happy_path(tmp_path: Path, csv_path: Path) -> None:
|
||||
j = _write_payload(tmp_path, "t.json")
|
||||
result = append_extraction(
|
||||
j, "vision", csv_path, META_PATH, CALENDAR_PATH
|
||||
)
|
||||
assert result["status"] == "ok", result
|
||||
assert result["reason"] == ""
|
||||
assert result["id"] == 1
|
||||
|
||||
with csv_path.open("r", encoding="utf-8", newline="") as fh:
|
||||
reader = csv.DictReader(fh)
|
||||
assert reader.fieldnames == list(CSV_COLUMNS)
|
||||
rows = list(reader)
|
||||
assert len(rows) == 1
|
||||
assert rows[0]["screenshot_file"] == row["screenshot_file"]
|
||||
assert rows[0]["set"] == "A2"
|
||||
assert rows[0]["source"] == "manual"
|
||||
rows = _read_rows(csv_path)
|
||||
assert len(rows) == 1
|
||||
r = rows[0]
|
||||
assert r["id"] == "1"
|
||||
assert r["screenshot_file"] == "dia-2026-05-13-1.png"
|
||||
assert r["source"] == "vision"
|
||||
assert r["data"] == "2026-05-13"
|
||||
assert r["zi"] == "Mi"
|
||||
assert r["ora_ro"] == "17:23"
|
||||
assert r["ora_utc"] == "14:23"
|
||||
assert r["set"] == "A2"
|
||||
assert r["instrument"] == "DIA"
|
||||
assert r["directie"] == "Buy"
|
||||
assert r["be_moved"] == "True"
|
||||
|
||||
def test_two_distinct_rows(self, csv_path: Path) -> None:
|
||||
e1 = parse_extraction_dict(_buy_payload(screenshot_file="a.png"))
|
||||
e2 = parse_extraction_dict(_buy_payload(screenshot_file="b.png"))
|
||||
append_row(e1, "manual", csv_path, META_PATH, CALENDAR_PATH)
|
||||
append_row(e2, "manual", csv_path, META_PATH, CALENDAR_PATH)
|
||||
rows = read_rows(csv_path)
|
||||
assert len(rows) == 2
|
||||
assert {r["screenshot_file"] for r in rows} == {"a.png", "b.png"}
|
||||
|
||||
def test_dedup_raises(self, csv_path: Path) -> None:
|
||||
extr = parse_extraction_dict(_buy_payload())
|
||||
append_row(extr, "manual", csv_path, META_PATH, CALENDAR_PATH)
|
||||
with pytest.raises(ValueError, match="duplicate"):
|
||||
append_row(extr, "manual", csv_path, META_PATH, CALENDAR_PATH)
|
||||
# CSV still contains exactly the one row
|
||||
assert len(read_rows(csv_path)) == 1
|
||||
def test_pl_calc_overlay(tmp_path: Path, csv_path: Path) -> None:
|
||||
"""outcome_path=TP0->TP1, max_reached=TP1 → pl_marius=0.5, pl_theoretical=0.333."""
|
||||
j = _write_payload(tmp_path, "t.json")
|
||||
result = append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
||||
assert result["status"] == "ok"
|
||||
r = _read_rows(csv_path)[0]
|
||||
assert float(r["pl_marius"]) == pytest.approx(0.50)
|
||||
assert float(r["pl_theoretical"]) == pytest.approx(0.333)
|
||||
|
||||
def test_dedup_skip(self, csv_path: Path) -> None:
|
||||
extr = parse_extraction_dict(_buy_payload())
|
||||
first = append_row(extr, "manual", csv_path, META_PATH, CALENDAR_PATH)
|
||||
# Mutate the extraction; the existing row should be returned untouched.
|
||||
extr2 = parse_extraction_dict(_buy_payload(note="changed"))
|
||||
existing = append_row(
|
||||
extr2, "manual", csv_path, META_PATH, CALENDAR_PATH, on_duplicate="skip"
|
||||
)
|
||||
assert existing["note"] == first["note"] == ""
|
||||
assert len(read_rows(csv_path)) == 1
|
||||
|
||||
def test_calibration_coexistence(self, csv_path: Path) -> None:
|
||||
"""manual_calibration + vision_calibration on the SAME screenshot must coexist."""
|
||||
extr = parse_extraction_dict(_buy_payload())
|
||||
append_row(extr, "manual_calibration", csv_path, META_PATH, CALENDAR_PATH)
|
||||
# Vision leg may differ slightly — change entry by 0.1, still valid.
|
||||
extr_vision = parse_extraction_dict(
|
||||
_buy_payload(entry=400.1, confidence="medium")
|
||||
)
|
||||
append_row(
|
||||
extr_vision, "vision_calibration", csv_path, META_PATH, CALENDAR_PATH
|
||||
)
|
||||
def test_dedup_same_source(tmp_path: Path, csv_path: Path) -> None:
|
||||
j = _write_payload(tmp_path, "t.json")
|
||||
r1 = append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
||||
r2 = append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
||||
assert r1["status"] == "ok"
|
||||
assert r2["status"] == "rejected"
|
||||
assert "duplicate" in r2["reason"].lower()
|
||||
assert r2["id"] is None
|
||||
assert r2["row"] is None
|
||||
assert len(_read_rows(csv_path)) == 1
|
||||
|
||||
rows = read_rows(csv_path)
|
||||
assert len(rows) == 2
|
||||
sources = {r["source"] for r in rows}
|
||||
assert sources == {"manual_calibration", "vision_calibration"}
|
||||
# Same screenshot, different source ⇒ no dedup collision.
|
||||
files = {r["screenshot_file"] for r in rows}
|
||||
assert files == {extr.screenshot_file}
|
||||
|
||||
def test_calibration_duplicate_same_source_rejected(
|
||||
self, csv_path: Path
|
||||
) -> None:
|
||||
extr = parse_extraction_dict(_buy_payload())
|
||||
append_row(extr, "manual_calibration", csv_path, META_PATH, CALENDAR_PATH)
|
||||
with pytest.raises(ValueError, match="duplicate"):
|
||||
append_row(
|
||||
extr, "manual_calibration", csv_path, META_PATH, CALENDAR_PATH
|
||||
)
|
||||
def test_dedup_different_source_ok(tmp_path: Path, csv_path: Path) -> None:
|
||||
"""Same screenshot_file + different source ⇒ both rows accepted."""
|
||||
j = _write_payload(tmp_path, "t.json")
|
||||
r1 = append_extraction(
|
||||
j, "manual_calibration", csv_path, META_PATH, CALENDAR_PATH
|
||||
)
|
||||
r2 = append_extraction(
|
||||
j, "vision_calibration", csv_path, META_PATH, CALENDAR_PATH
|
||||
)
|
||||
assert r1["status"] == "ok"
|
||||
assert r2["status"] == "ok"
|
||||
rows = _read_rows(csv_path)
|
||||
assert len(rows) == 2
|
||||
assert {r["source"] for r in rows} == {"manual_calibration", "vision_calibration"}
|
||||
# Distinct sequential ids.
|
||||
assert {r["id"] for r in rows} == {"1", "2"}
|
||||
|
||||
|
||||
def test_invalid_pydantic_rejected(tmp_path: Path, csv_path: Path) -> None:
|
||||
"""entry == sl is rejected by pydantic; no CSV is written."""
|
||||
j = _write_payload(tmp_path, "bad.json", entry=399.0, sl=399.0)
|
||||
result = append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
||||
assert result["status"] == "rejected"
|
||||
assert "validation" in result["reason"].lower()
|
||||
assert not csv_path.exists()
|
||||
|
||||
|
||||
def test_missing_json_file(tmp_path: Path, csv_path: Path) -> None:
|
||||
missing = tmp_path / "ghost.json"
|
||||
result = append_extraction(
|
||||
missing, "vision", csv_path, META_PATH, CALENDAR_PATH
|
||||
)
|
||||
assert result["status"] == "rejected"
|
||||
assert "not found" in result["reason"].lower()
|
||||
assert not csv_path.exists()
|
||||
|
||||
|
||||
def test_id_increments(tmp_path: Path, csv_path: Path) -> None:
|
||||
paths = [
|
||||
_write_payload(tmp_path, "a.json", screenshot_file="a.png"),
|
||||
_write_payload(tmp_path, "b.json", screenshot_file="b.png"),
|
||||
_write_payload(tmp_path, "c.json", screenshot_file="c.png"),
|
||||
]
|
||||
ids = []
|
||||
for p in paths:
|
||||
r = append_extraction(p, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
||||
assert r["status"] == "ok"
|
||||
ids.append(r["id"])
|
||||
assert ids == [1, 2, 3]
|
||||
csv_ids = [int(r["id"]) for r in _read_rows(csv_path)]
|
||||
assert csv_ids == [1, 2, 3]
|
||||
|
||||
|
||||
def test_set_a2(tmp_path: Path, csv_path: Path) -> None:
|
||||
"""Wed 2026-05-13 14:30 UTC → 17:30 RO → A2 sweet spot."""
|
||||
j = _write_payload(tmp_path, "t.json", ora_utc="14:30")
|
||||
r = append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
||||
assert r["status"] == "ok"
|
||||
row = _read_rows(csv_path)[0]
|
||||
assert row["ora_ro"] == "17:30"
|
||||
assert row["zi"] == "Mi"
|
||||
assert row["set"] == "A2"
|
||||
|
||||
|
||||
def test_set_c_fomc(tmp_path: Path, csv_path: Path) -> None:
|
||||
"""2026-04-29 18:35 UTC == 21:35 RO (FOMC Powell Press window) → Set C."""
|
||||
j = _write_payload(
|
||||
tmp_path,
|
||||
"t.json",
|
||||
data="2026-04-29",
|
||||
ora_utc="18:35",
|
||||
screenshot_file="fomc-apr.png",
|
||||
)
|
||||
r = append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
||||
assert r["status"] == "ok"
|
||||
row = _read_rows(csv_path)[0]
|
||||
assert row["ora_ro"] == "21:35"
|
||||
assert row["set"] == "C"
|
||||
|
||||
|
||||
def test_versions_stamped(tmp_path: Path, csv_path: Path) -> None:
|
||||
j = _write_payload(tmp_path, "t.json")
|
||||
append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
||||
row = _read_rows(csv_path)[0]
|
||||
meta = yaml.safe_load(META_PATH.read_text(encoding="utf-8"))
|
||||
assert row["indicator_version"] == str(meta["indicator_version"])
|
||||
assert row["pl_overlay_version"] == str(meta["pl_overlay_version"])
|
||||
assert row["csv_schema_version"] == str(meta["csv_schema_version"])
|
||||
|
||||
|
||||
def test_extracted_at_format(tmp_path: Path, csv_path: Path) -> None:
|
||||
j = _write_payload(tmp_path, "t.json")
|
||||
append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
||||
val = _read_rows(csv_path)[0]["extracted_at"]
|
||||
# ISO 8601 UTC with trailing 'Z': YYYY-MM-DDTHH:MM:SSZ
|
||||
assert re.match(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z$", val), val
|
||||
# Round-trip through datetime.fromisoformat after dropping the Z.
|
||||
parsed = datetime.fromisoformat(val[:-1])
|
||||
assert parsed.year >= 2026
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Cross-field invalid input
|
||||
# additional safety nets
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestInvalidInput:
|
||||
def test_buy_with_inverted_tp_rejected_before_append(
|
||||
self, csv_path: Path
|
||||
) -> None:
|
||||
# tp1 < tp0 violates Buy ordering: caught at validation, not by append_row.
|
||||
with pytest.raises(ValidationError):
|
||||
parse_extraction_dict(
|
||||
_buy_payload(tp0=401.0, tp1=400.5, tp2=402.0)
|
||||
)
|
||||
assert not csv_path.exists() # nothing written
|
||||
|
||||
def test_outcome_path_sl_with_tp1_max_rejected(self, csv_path: Path) -> None:
|
||||
with pytest.raises(ValidationError):
|
||||
parse_extraction_dict(
|
||||
_buy_payload(outcome_path="SL", max_reached="TP1")
|
||||
)
|
||||
assert not csv_path.exists()
|
||||
|
||||
def test_append_row_from_json_invalid_payload(
|
||||
self, tmp_path: Path, csv_path: Path
|
||||
) -> None:
|
||||
bad = tmp_path / "bad.json"
|
||||
payload = _buy_payload(directie="Long") # invalid Literal
|
||||
bad.write_text(json.dumps(payload), encoding="utf-8")
|
||||
with pytest.raises(ValidationError):
|
||||
append_row_from_json(
|
||||
bad, "vision", csv_path, META_PATH, CALENDAR_PATH
|
||||
)
|
||||
assert not csv_path.exists()
|
||||
def test_invalid_source_rejected(tmp_path: Path, csv_path: Path) -> None:
|
||||
j = _write_payload(tmp_path, "t.json")
|
||||
r = append_extraction(j, "auto_magic", csv_path, META_PATH, CALENDAR_PATH)
|
||||
assert r["status"] == "rejected"
|
||||
assert "source" in r["reason"].lower()
|
||||
assert not csv_path.exists()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Atomic write: no temp file remains on disk
|
||||
# ---------------------------------------------------------------------------
|
||||
def test_all_valid_sources_accepted(tmp_path: Path, csv_path: Path) -> None:
|
||||
for i, src in enumerate(sorted(VALID_SOURCES)):
|
||||
j = _write_payload(tmp_path, f"t{i}.json", screenshot_file=f"s{i}.png")
|
||||
r = append_extraction(j, src, csv_path, META_PATH, CALENDAR_PATH)
|
||||
assert r["status"] == "ok", (src, r)
|
||||
rows = _read_rows(csv_path)
|
||||
assert {r["source"] for r in rows} == set(VALID_SOURCES)
|
||||
|
||||
|
||||
class TestAtomicWrite:
|
||||
def test_no_temp_file_left_behind(self, csv_path: Path) -> None:
|
||||
extr = parse_extraction_dict(_buy_payload())
|
||||
append_row(extr, "manual", csv_path, META_PATH, CALENDAR_PATH)
|
||||
leftovers = [
|
||||
p for p in csv_path.parent.iterdir() if p.name.endswith(".tmp")
|
||||
]
|
||||
assert leftovers == []
|
||||
def test_atomic_write_leaves_no_tmp(tmp_path: Path, csv_path: Path) -> None:
|
||||
j = _write_payload(tmp_path, "t.json")
|
||||
append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
||||
leftovers = [p for p in csv_path.parent.iterdir() if p.name.endswith(".tmp")]
|
||||
assert leftovers == []
|
||||
|
||||
def test_append_row_from_json_roundtrip(
|
||||
self, tmp_path: Path, csv_path: Path
|
||||
) -> None:
|
||||
good = tmp_path / "good.json"
|
||||
good.write_text(json.dumps(_buy_payload()), encoding="utf-8")
|
||||
row = append_row_from_json(
|
||||
good, "vision", csv_path, META_PATH, CALENDAR_PATH
|
||||
)
|
||||
assert row["source"] == "vision"
|
||||
assert read_rows(csv_path)[0]["screenshot_file"] == row["screenshot_file"]
|
||||
|
||||
def test_zi_ro_map_covers_all_weekdays() -> None:
|
||||
"""Internal sanity: the Romanian-day map covers all 7 short weekday names."""
|
||||
assert set(ZI_RO_MAP.keys()) == {"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"}
|
||||
assert set(ZI_RO_MAP.values()) == {"Lu", "Ma", "Mi", "Jo", "Vi", "Sa", "Du"}
|
||||
|
||||
|
||||
def test_malformed_json_rejected(tmp_path: Path, csv_path: Path) -> None:
|
||||
bad = tmp_path / "broken.json"
|
||||
bad.write_text("{not valid json", encoding="utf-8")
|
||||
r = append_extraction(bad, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
||||
assert r["status"] == "rejected"
|
||||
assert "validation" in r["reason"].lower() or "json" in r["reason"].lower()
|
||||
assert not csv_path.exists()
|
||||
|
||||
208
tests/test_regenerate_md.py
Normal file
208
tests/test_regenerate_md.py
Normal file
@@ -0,0 +1,208 @@
|
||||
"""Tests for scripts/regenerate_md.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import csv
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
|
||||
|
||||
from scripts.append_row import csv_columns # noqa: E402
|
||||
from scripts.regenerate_md import MD_COLUMNS, regenerate_md # noqa: E402
|
||||
|
||||
|
||||
def _row(**overrides: str) -> dict[str, str]:
|
||||
base = {
|
||||
"id": "1",
|
||||
"screenshot_file": "2026-05-13_dia_5min.png",
|
||||
"source": "vision",
|
||||
"data": "2026-05-13",
|
||||
"zi": "Mi",
|
||||
"ora_ro": "17:23",
|
||||
"ora_utc": "14:23",
|
||||
"instrument": "DIA",
|
||||
"directie": "long",
|
||||
"tf_mare": "5min",
|
||||
"tf_mic": "1min",
|
||||
"calitate": "Clară",
|
||||
"entry": "497.42",
|
||||
"sl": "496.80",
|
||||
"tp0": "497.67",
|
||||
"tp1": "497.79",
|
||||
"tp2": "498.04",
|
||||
"risc_pct": "0.50",
|
||||
"outcome_path": "TP0→TP1",
|
||||
"max_reached": "TP1",
|
||||
"be_moved": "true",
|
||||
"pl_marius": "0.5000",
|
||||
"pl_theoretical": "0.3330",
|
||||
"set": "A2",
|
||||
"indicator_version": "1",
|
||||
"pl_overlay_version": "1",
|
||||
"csv_schema_version": "1",
|
||||
"extracted_at": "2026-05-13T14:30:00Z",
|
||||
"note": "",
|
||||
}
|
||||
base.update(overrides)
|
||||
return base
|
||||
|
||||
|
||||
def _write_csv(
|
||||
path: Path,
|
||||
rows: list[dict[str, str]],
|
||||
extra_columns: list[str] | None = None,
|
||||
) -> None:
|
||||
fieldnames = csv_columns()
|
||||
if extra_columns:
|
||||
fieldnames = fieldnames + extra_columns
|
||||
with path.open("w", encoding="utf-8", newline="") as fh:
|
||||
writer = csv.DictWriter(fh, fieldnames=fieldnames)
|
||||
writer.writeheader()
|
||||
for r in rows:
|
||||
writer.writerow({k: r.get(k, "") for k in fieldnames})
|
||||
|
||||
|
||||
def _data_lines(md_text: str) -> list[str]:
|
||||
header_prefix = "| " + MD_COLUMNS[0] + " | " + MD_COLUMNS[1]
|
||||
return [
|
||||
ln
|
||||
for ln in md_text.splitlines()
|
||||
if ln.startswith("|")
|
||||
and not ln.startswith(header_prefix)
|
||||
and not ln.startswith("|---")
|
||||
]
|
||||
|
||||
|
||||
def test_empty_csv_placeholder(tmp_path: Path) -> None:
|
||||
csv_p = tmp_path / "jurnal.csv"
|
||||
md_p = tmp_path / "jurnal.md"
|
||||
_write_csv(csv_p, [])
|
||||
|
||||
n = regenerate_md(csv_p, md_p)
|
||||
|
||||
assert n == 0
|
||||
content = md_p.read_text(encoding="utf-8")
|
||||
assert "# Jurnal M2D (auto-generated)" in content
|
||||
assert "Niciun trade încă" in content
|
||||
assert "| # |" not in content
|
||||
|
||||
|
||||
def test_missing_csv_placeholder(tmp_path: Path) -> None:
|
||||
csv_p = tmp_path / "does_not_exist.csv"
|
||||
md_p = tmp_path / "jurnal.md"
|
||||
|
||||
n = regenerate_md(csv_p, md_p)
|
||||
|
||||
assert n == 0
|
||||
content = md_p.read_text(encoding="utf-8")
|
||||
assert "Niciun trade încă" in content
|
||||
assert md_p.exists()
|
||||
|
||||
|
||||
def test_single_row_format(tmp_path: Path) -> None:
|
||||
csv_p = tmp_path / "jurnal.csv"
|
||||
md_p = tmp_path / "jurnal.md"
|
||||
_write_csv(csv_p, [_row()])
|
||||
|
||||
n = regenerate_md(csv_p, md_p)
|
||||
|
||||
assert n == 1
|
||||
content = md_p.read_text(encoding="utf-8")
|
||||
assert "# Jurnal M2D (auto-generated from data/jurnal.csv)" in content
|
||||
assert "Rows: 1" in content
|
||||
header_line = "| " + " | ".join(MD_COLUMNS) + " |"
|
||||
assert header_line in content
|
||||
rows = _data_lines(content)
|
||||
assert len(rows) == 1
|
||||
cells = [c.strip() for c in rows[0].strip("|").split("|")]
|
||||
assert cells[0] == "1"
|
||||
assert cells[1] == "2026-05-13"
|
||||
assert cells[2] == "Mi"
|
||||
assert cells[3] == "17:23"
|
||||
assert cells[4] == "A2"
|
||||
assert cells[5] == "DIA"
|
||||
assert cells[6] == "Buy"
|
||||
assert cells[7] == "Clară"
|
||||
assert cells[13] == "TP0→TP1"
|
||||
assert cells[14] == "+0.50"
|
||||
assert cells[15] == "+0.33"
|
||||
assert cells[16] == "vision"
|
||||
|
||||
|
||||
def test_three_rows(tmp_path: Path) -> None:
|
||||
csv_p = tmp_path / "jurnal.csv"
|
||||
md_p = tmp_path / "jurnal.md"
|
||||
rows = [
|
||||
_row(id="3", data="2026-05-15", pl_marius="-1.0000"),
|
||||
_row(id="1", data="2026-05-13"),
|
||||
_row(id="2", data="2026-05-14", pl_marius="0.2000"),
|
||||
]
|
||||
_write_csv(csv_p, rows)
|
||||
|
||||
n = regenerate_md(csv_p, md_p)
|
||||
|
||||
assert n == 3
|
||||
content = md_p.read_text(encoding="utf-8")
|
||||
assert "Rows: 3" in content
|
||||
data = _data_lines(content)
|
||||
assert len(data) == 3
|
||||
assert "| 1 | 2026-05-13 |" in data[0]
|
||||
assert "| 2 | 2026-05-14 |" in data[1]
|
||||
assert "| 3 | 2026-05-15 |" in data[2]
|
||||
|
||||
|
||||
def test_pending_pl_displayed(tmp_path: Path) -> None:
|
||||
csv_p = tmp_path / "jurnal.csv"
|
||||
md_p = tmp_path / "jurnal.md"
|
||||
_write_csv(csv_p, [_row(pl_marius="", pl_theoretical="")])
|
||||
|
||||
n = regenerate_md(csv_p, md_p)
|
||||
|
||||
assert n == 1
|
||||
content = md_p.read_text(encoding="utf-8")
|
||||
rows = _data_lines(content)
|
||||
cells = [c.strip() for c in rows[0].strip("|").split("|")]
|
||||
assert cells[14] == "pending"
|
||||
assert cells[15] == "pending"
|
||||
|
||||
|
||||
def test_unknown_column_graceful(
|
||||
tmp_path: Path, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
csv_p = tmp_path / "jurnal.csv"
|
||||
md_p = tmp_path / "jurnal.md"
|
||||
_write_csv(csv_p, [_row()], extra_columns=["extra_field"])
|
||||
|
||||
n = regenerate_md(csv_p, md_p)
|
||||
|
||||
assert n == 1
|
||||
content = md_p.read_text(encoding="utf-8")
|
||||
assert "Rows: 1" in content
|
||||
captured = capsys.readouterr()
|
||||
assert "unknown CSV columns ignored" in captured.err
|
||||
assert "extra_field" in captured.err
|
||||
|
||||
|
||||
def test_atomic_write_no_tmp_leftover(tmp_path: Path) -> None:
|
||||
csv_p = tmp_path / "jurnal.csv"
|
||||
md_p = tmp_path / "jurnal.md"
|
||||
_write_csv(csv_p, [_row()])
|
||||
|
||||
regenerate_md(csv_p, md_p)
|
||||
|
||||
leftovers = list(tmp_path.glob("*.tmp"))
|
||||
assert leftovers == []
|
||||
assert md_p.exists()
|
||||
|
||||
|
||||
def test_rows_count_returned(tmp_path: Path) -> None:
|
||||
csv_p = tmp_path / "jurnal.csv"
|
||||
md_p = tmp_path / "jurnal.md"
|
||||
_write_csv(csv_p, [_row(id=str(i)) for i in range(1, 6)])
|
||||
|
||||
n = regenerate_md(csv_p, md_p)
|
||||
|
||||
assert n == 5
|
||||
469
tests/test_stats.py
Normal file
469
tests/test_stats.py
Normal file
@@ -0,0 +1,469 @@
|
||||
"""Tests for scripts/stats.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import csv
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
|
||||
|
||||
from scripts.append_row import CSV_COLUMNS # noqa: E402
|
||||
from scripts.stats import ( # noqa: E402
|
||||
BACKTEST_SOURCES,
|
||||
CORE_CALIBRATION_FIELDS,
|
||||
bootstrap_ci,
|
||||
calibration_mismatch,
|
||||
compute_group_stats,
|
||||
expectancy,
|
||||
format_calibration_report,
|
||||
format_report,
|
||||
group_by,
|
||||
load_trades,
|
||||
main,
|
||||
win_rate,
|
||||
wilson_ci,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Synthetic CSV fixture: 30 trades
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _base_row(**overrides) -> dict[str, str]:
|
||||
base = {
|
||||
"id": "0",
|
||||
"screenshot_file": "",
|
||||
"source": "vision",
|
||||
"data": "2026-05-13",
|
||||
"zi": "Mi",
|
||||
"ora_ro": "17:30",
|
||||
"ora_utc": "14:30",
|
||||
"instrument": "DIA",
|
||||
"directie": "Buy",
|
||||
"tf_mare": "5min",
|
||||
"tf_mic": "1min",
|
||||
"calitate": "Clară",
|
||||
"entry": "400.0",
|
||||
"sl": "399.0",
|
||||
"tp0": "400.5",
|
||||
"tp1": "401.0",
|
||||
"tp2": "402.0",
|
||||
"risc_pct": "0.25",
|
||||
"outcome_path": "TP0→TP1",
|
||||
"max_reached": "TP1",
|
||||
"be_moved": "True",
|
||||
"pl_marius": "0.5000",
|
||||
"pl_theoretical": "0.3330",
|
||||
"set": "A2",
|
||||
"indicator_version": "v-2026-05",
|
||||
"pl_overlay_version": "marius-v1",
|
||||
"csv_schema_version": "1",
|
||||
"extracted_at": "2026-05-13T10:00:00Z",
|
||||
"note": "",
|
||||
}
|
||||
base.update({k: str(v) for k, v in overrides.items()})
|
||||
return base
|
||||
|
||||
|
||||
def _write_csv(path: Path, rows: list[dict[str, str]]) -> None:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with path.open("w", encoding="utf-8", newline="") as fh:
|
||||
w = csv.DictWriter(fh, fieldnames=list(CSV_COLUMNS))
|
||||
w.writeheader()
|
||||
for r in rows:
|
||||
w.writerow({k: r.get(k, "") for k in CSV_COLUMNS})
|
||||
|
||||
|
||||
def _synthetic_30(tmp_path: Path) -> Path:
|
||||
"""30 vision-source trades engineered for known stats.
|
||||
|
||||
Layout (by Set):
|
||||
- A1: 10 trades — 6 wins TP0->TP1 (+0.5), 4 losses SL (-1.0) → WR 60%
|
||||
- A2: 10 trades — 7 wins TP0->TP2 (+0.5), 3 losses SL (-1.0) → WR 70%
|
||||
- A3: 10 trades — 4 wins TP0->TP1 (+0.5), 6 losses SL (-1.0) → WR 40%
|
||||
|
||||
Overall: 17 wins / 30, WR ≈ 56.67%.
|
||||
"""
|
||||
rows: list[dict[str, str]] = []
|
||||
rid = 0
|
||||
|
||||
def add(set_label: str, n_win: int, n_loss: int, calitate: str = "Clară") -> None:
|
||||
nonlocal rid
|
||||
for _ in range(n_win):
|
||||
rid += 1
|
||||
rows.append(
|
||||
_base_row(
|
||||
id=rid,
|
||||
screenshot_file=f"win-{rid}.png",
|
||||
set=set_label,
|
||||
calitate=calitate,
|
||||
outcome_path="TP0→TP1",
|
||||
max_reached="TP1",
|
||||
be_moved="True",
|
||||
pl_marius="0.5000",
|
||||
pl_theoretical="0.3330",
|
||||
)
|
||||
)
|
||||
for _ in range(n_loss):
|
||||
rid += 1
|
||||
rows.append(
|
||||
_base_row(
|
||||
id=rid,
|
||||
screenshot_file=f"loss-{rid}.png",
|
||||
set=set_label,
|
||||
calitate=calitate,
|
||||
outcome_path="SL",
|
||||
max_reached="SL_first",
|
||||
be_moved="False",
|
||||
pl_marius="-1.0000",
|
||||
pl_theoretical="-1.0000",
|
||||
)
|
||||
)
|
||||
|
||||
add("A1", 6, 4)
|
||||
add("A2", 7, 3)
|
||||
add("A3", 4, 6)
|
||||
|
||||
path = tmp_path / "jurnal.csv"
|
||||
_write_csv(path, rows)
|
||||
return path
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Wilson CI — reference values
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestWilsonCI:
|
||||
def test_n_zero(self) -> None:
|
||||
assert wilson_ci(0, 0) == (0.0, 0.0)
|
||||
|
||||
def test_50pct_at_n40(self) -> None:
|
||||
lo, hi = wilson_ci(20, 40)
|
||||
assert lo == pytest.approx(0.3519927879709976, abs=1e-9)
|
||||
assert hi == pytest.approx(0.6480072120290024, abs=1e-9)
|
||||
|
||||
def test_55pct_at_n40(self) -> None:
|
||||
lo, hi = wilson_ci(22, 40)
|
||||
assert lo == pytest.approx(0.3982882988844078, abs=1e-9)
|
||||
assert hi == pytest.approx(0.6929492471905531, abs=1e-9)
|
||||
|
||||
def test_55pct_at_n100(self) -> None:
|
||||
# Larger N tightens the CI; lower bound rises above 45%.
|
||||
lo, hi = wilson_ci(55, 100)
|
||||
assert lo == pytest.approx(0.4524442703164345, abs=1e-9)
|
||||
assert hi == pytest.approx(0.6438562489359655, abs=1e-9)
|
||||
assert lo > 0.45 # STOPPING_RULE GO-LIVE gate
|
||||
|
||||
def test_zero_wins(self) -> None:
|
||||
lo, hi = wilson_ci(0, 10)
|
||||
assert lo == pytest.approx(0.0, abs=1e-12)
|
||||
assert hi == pytest.approx(0.2775401687666165, abs=1e-9)
|
||||
|
||||
def test_all_wins(self) -> None:
|
||||
lo, hi = wilson_ci(10, 10)
|
||||
assert lo == pytest.approx(0.7224598312333834, abs=1e-9)
|
||||
assert hi == pytest.approx(1.0, abs=1e-12)
|
||||
|
||||
def test_wins_out_of_range(self) -> None:
|
||||
with pytest.raises(ValueError):
|
||||
wilson_ci(11, 10)
|
||||
with pytest.raises(ValueError):
|
||||
wilson_ci(-1, 10)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Bootstrap CI — determinism + sanity
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestBootstrapCI:
|
||||
def test_deterministic_with_seed(self) -> None:
|
||||
vals = [0.5, -1.0, 0.5, 0.5, -1.0, 0.2, -0.3, 0.5, -1.0, 0.5]
|
||||
lo1, hi1 = bootstrap_ci(vals, iterations=500, seed=42)
|
||||
lo2, hi2 = bootstrap_ci(vals, iterations=500, seed=42)
|
||||
assert (lo1, hi1) == (lo2, hi2)
|
||||
|
||||
def test_different_seed_different_result(self) -> None:
|
||||
vals = [0.5, -1.0, 0.5, 0.5, -1.0, 0.2, -0.3, 0.5, -1.0, 0.5]
|
||||
r1 = bootstrap_ci(vals, iterations=500, seed=1)
|
||||
r2 = bootstrap_ci(vals, iterations=500, seed=2)
|
||||
assert r1 != r2
|
||||
|
||||
def test_brackets_the_mean(self) -> None:
|
||||
vals = [0.5, -1.0, 0.5, 0.5, -1.0, 0.2, -0.3, 0.5, -1.0, 0.5] * 5
|
||||
mean = sum(vals) / len(vals)
|
||||
lo, hi = bootstrap_ci(vals, iterations=1000, seed=7)
|
||||
assert lo <= mean <= hi
|
||||
|
||||
def test_empty_input(self) -> None:
|
||||
assert bootstrap_ci([], iterations=100, seed=0) == (0.0, 0.0)
|
||||
|
||||
def test_single_value(self) -> None:
|
||||
lo, hi = bootstrap_ci([0.5], iterations=100, seed=0)
|
||||
# No variance with n=1: short-circuited to (mean, mean).
|
||||
assert lo == pytest.approx(0.5)
|
||||
assert hi == pytest.approx(0.5)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Loading + group stats on the 30-trade fixture
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSyntheticFixture:
|
||||
def test_load_30(self, tmp_path: Path) -> None:
|
||||
path = _synthetic_30(tmp_path)
|
||||
trades = load_trades(path)
|
||||
assert len(trades) == 30
|
||||
assert all(t.source == "vision" for t in trades)
|
||||
|
||||
def test_overall_wr(self, tmp_path: Path) -> None:
|
||||
trades = load_trades(_synthetic_30(tmp_path))
|
||||
wins, n, wr = win_rate(trades)
|
||||
assert wins == 17
|
||||
assert n == 30
|
||||
assert wr == pytest.approx(17 / 30)
|
||||
|
||||
def test_overall_expectancy(self, tmp_path: Path) -> None:
|
||||
trades = load_trades(_synthetic_30(tmp_path))
|
||||
# 17 wins * 0.5 + 13 losses * -1.0 = 8.5 - 13.0 = -4.5 → mean = -0.15
|
||||
assert expectancy(trades) == pytest.approx(-0.15, abs=1e-9)
|
||||
|
||||
def test_per_set_wr(self, tmp_path: Path) -> None:
|
||||
trades = load_trades(_synthetic_30(tmp_path))
|
||||
by_set = group_by(trades, "set")
|
||||
wr_a1 = win_rate(by_set["A1"])[2]
|
||||
wr_a2 = win_rate(by_set["A2"])[2]
|
||||
wr_a3 = win_rate(by_set["A3"])[2]
|
||||
assert wr_a1 == pytest.approx(0.60)
|
||||
assert wr_a2 == pytest.approx(0.70)
|
||||
assert wr_a3 == pytest.approx(0.40)
|
||||
|
||||
def test_group_stats_a2(self, tmp_path: Path) -> None:
|
||||
trades = load_trades(_synthetic_30(tmp_path))
|
||||
a2 = [t for t in trades if t.set == "A2"]
|
||||
s = compute_group_stats(
|
||||
a2, label="A2", bootstrap_iterations=500, seed=11
|
||||
)
|
||||
assert s.n_total == 10
|
||||
assert s.n_resolved == 10
|
||||
assert s.wins == 7
|
||||
assert s.wr == pytest.approx(0.70)
|
||||
# Wilson 7/10
|
||||
assert s.wr_ci_lo == pytest.approx(0.3967732199795652, abs=1e-9)
|
||||
assert s.wr_ci_hi == pytest.approx(0.892210712513788, abs=1e-9)
|
||||
# Expectancy A2 = 7*0.5 + 3*(-1.0) = 0.5 → mean = 0.05
|
||||
assert s.exp_marius == pytest.approx(0.05, abs=1e-9)
|
||||
assert s.exp_marius_ci_lo <= s.exp_marius <= s.exp_marius_ci_hi
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Pending-trade handling
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestPendingHandling:
|
||||
def test_pending_excluded_from_wr(self, tmp_path: Path) -> None:
|
||||
rows = [
|
||||
_base_row(
|
||||
id=1, screenshot_file="a.png",
|
||||
outcome_path="TP0→TP1", max_reached="TP1",
|
||||
be_moved="True", pl_marius="0.5000", pl_theoretical="0.3330",
|
||||
),
|
||||
_base_row(
|
||||
id=2, screenshot_file="b.png",
|
||||
outcome_path="pending", max_reached="TP0",
|
||||
be_moved="False", pl_marius="", pl_theoretical="0.1330",
|
||||
),
|
||||
_base_row(
|
||||
id=3, screenshot_file="c.png",
|
||||
outcome_path="SL", max_reached="SL_first",
|
||||
be_moved="False", pl_marius="-1.0000", pl_theoretical="-1.0000",
|
||||
),
|
||||
]
|
||||
p = tmp_path / "j.csv"
|
||||
_write_csv(p, rows)
|
||||
trades = load_trades(p)
|
||||
|
||||
wins, n, wr = win_rate(trades)
|
||||
assert wins == 1
|
||||
assert n == 2 # pending excluded
|
||||
assert wr == pytest.approx(0.5)
|
||||
# Expectancy on pl_marius averages only resolved rows: (0.5 + -1.0) / 2 = -0.25
|
||||
assert expectancy(trades, "pl_marius") == pytest.approx(-0.25)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Source filtering: calibration rows excluded from main report
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSourceFiltering:
|
||||
def test_calibration_rows_excluded_from_backtest_stats(
|
||||
self, tmp_path: Path
|
||||
) -> None:
|
||||
rows = [
|
||||
_base_row(id=1, source="vision", screenshot_file="v.png",
|
||||
pl_marius="0.5000"),
|
||||
_base_row(id=2, source="manual", screenshot_file="m.png",
|
||||
pl_marius="0.5000"),
|
||||
_base_row(id=3, source="manual_calibration", screenshot_file="c.png",
|
||||
pl_marius="-1.0000"),
|
||||
_base_row(id=4, source="vision_calibration", screenshot_file="c.png",
|
||||
pl_marius="-1.0000"),
|
||||
]
|
||||
p = tmp_path / "j.csv"
|
||||
_write_csv(p, rows)
|
||||
trades = load_trades(p)
|
||||
backtest = [t for t in trades if t.source in BACKTEST_SOURCES]
|
||||
assert len(backtest) == 2
|
||||
wins, n, wr = win_rate(backtest)
|
||||
assert (wins, n) == (2, 2)
|
||||
assert wr == pytest.approx(1.0)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Calibration mode: pairing + mismatch
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestCalibration:
|
||||
def test_pairs_and_zero_mismatch(self, tmp_path: Path) -> None:
|
||||
m = _base_row(
|
||||
id=1, source="manual_calibration", screenshot_file="cal-1.png"
|
||||
)
|
||||
v = _base_row(
|
||||
id=2, source="vision_calibration", screenshot_file="cal-1.png"
|
||||
)
|
||||
p = tmp_path / "j.csv"
|
||||
_write_csv(p, [m, v])
|
||||
trades = load_trades(p)
|
||||
rep = calibration_mismatch(trades)
|
||||
assert rep.pairs == 1
|
||||
assert sum(rep.field_mismatches.values()) == 0
|
||||
assert rep.overall_mismatch_rate == 0.0
|
||||
|
||||
def test_one_field_mismatch(self, tmp_path: Path) -> None:
|
||||
m = _base_row(
|
||||
id=1, source="manual_calibration", screenshot_file="cal-1.png",
|
||||
entry="400.0",
|
||||
)
|
||||
v = _base_row(
|
||||
id=2, source="vision_calibration", screenshot_file="cal-1.png",
|
||||
entry="400.10", # different entry
|
||||
)
|
||||
p = tmp_path / "j.csv"
|
||||
_write_csv(p, [m, v])
|
||||
trades = load_trades(p)
|
||||
rep = calibration_mismatch(trades)
|
||||
assert rep.pairs == 1
|
||||
assert rep.field_mismatches["entry"] == 1
|
||||
# all other core fields match
|
||||
for fld in CORE_CALIBRATION_FIELDS:
|
||||
if fld == "entry":
|
||||
continue
|
||||
assert rep.field_mismatches[fld] == 0
|
||||
# 1 mismatch / (1 pair * 8 fields) = 12.5%
|
||||
assert rep.overall_mismatch_rate == pytest.approx(1.0 / len(CORE_CALIBRATION_FIELDS))
|
||||
|
||||
def test_unpaired_rows_ignored(self, tmp_path: Path) -> None:
|
||||
# Only a manual leg — no pair → 0 pairs.
|
||||
m = _base_row(
|
||||
id=1, source="manual_calibration", screenshot_file="lonely.png"
|
||||
)
|
||||
p = tmp_path / "j.csv"
|
||||
_write_csv(p, [m])
|
||||
trades = load_trades(p)
|
||||
rep = calibration_mismatch(trades)
|
||||
assert rep.pairs == 0
|
||||
assert rep.total_comparisons == 0
|
||||
assert rep.overall_mismatch_rate == 0.0
|
||||
|
||||
def test_numeric_equivalence_tolerated(self, tmp_path: Path) -> None:
|
||||
"""'400' and '400.0000' should NOT count as a mismatch on entry."""
|
||||
m = _base_row(
|
||||
id=1, source="manual_calibration", screenshot_file="cal-1.png",
|
||||
entry="400",
|
||||
)
|
||||
v = _base_row(
|
||||
id=2, source="vision_calibration", screenshot_file="cal-1.png",
|
||||
entry="400.0000",
|
||||
)
|
||||
p = tmp_path / "j.csv"
|
||||
_write_csv(p, [m, v])
|
||||
rep = calibration_mismatch(load_trades(p))
|
||||
assert rep.field_mismatches["entry"] == 0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Report formatting + CLI
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestReporting:
|
||||
def test_format_report_contains_sections(self, tmp_path: Path) -> None:
|
||||
out = format_report(
|
||||
load_trades(_synthetic_30(tmp_path)),
|
||||
bootstrap_iterations=200,
|
||||
seed=0,
|
||||
)
|
||||
assert "M2D Backtest Stats" in out
|
||||
assert "Overall" in out
|
||||
assert "By Set" in out
|
||||
assert "A1" in out and "A2" in out and "A3" in out
|
||||
# calitate warning present
|
||||
assert "descriptor only" in out.lower() or "biased" in out.lower()
|
||||
|
||||
def test_format_calibration_report(self, tmp_path: Path) -> None:
|
||||
rows = [
|
||||
_base_row(
|
||||
id=1, source="manual_calibration", screenshot_file="cal-1.png"
|
||||
),
|
||||
_base_row(
|
||||
id=2, source="vision_calibration", screenshot_file="cal-1.png",
|
||||
directie="Sell", # mismatch on directie
|
||||
entry="400.0", sl="401.0", tp0="399.5", tp1="399.0", tp2="398.0",
|
||||
),
|
||||
]
|
||||
p = tmp_path / "j.csv"
|
||||
_write_csv(p, rows)
|
||||
out = format_calibration_report(load_trades(p))
|
||||
assert "Paired screenshots" in out
|
||||
assert "directie" in out
|
||||
# 1 mismatch (directie) of 8 fields = 12.5% → FAIL P4 gate
|
||||
assert "FAIL" in out
|
||||
|
||||
def test_empty_csv_report(self, tmp_path: Path) -> None:
|
||||
p = tmp_path / "empty.csv"
|
||||
_write_csv(p, [])
|
||||
out = format_report(load_trades(p))
|
||||
assert "no backtest trades" in out.lower()
|
||||
|
||||
def test_main_cli_runs(
|
||||
self, tmp_path: Path, capsys: pytest.CaptureFixture
|
||||
) -> None:
|
||||
path = _synthetic_30(tmp_path)
|
||||
rc = main(["--csv", str(path), "--seed", "0", "--bootstrap-iterations", "100"])
|
||||
assert rc == 0
|
||||
captured = capsys.readouterr()
|
||||
assert "M2D Backtest Stats" in captured.out
|
||||
|
||||
def test_main_cli_calibration(
|
||||
self, tmp_path: Path, capsys: pytest.CaptureFixture
|
||||
) -> None:
|
||||
rows = [
|
||||
_base_row(id=1, source="manual_calibration", screenshot_file="cal-1.png"),
|
||||
_base_row(id=2, source="vision_calibration", screenshot_file="cal-1.png"),
|
||||
]
|
||||
p = tmp_path / "j.csv"
|
||||
_write_csv(p, rows)
|
||||
rc = main(["--csv", str(p), "--calibration"])
|
||||
assert rc == 0
|
||||
out = capsys.readouterr().out
|
||||
assert "Calibration P4 gate" in out
|
||||
assert "PASS" in out # all fields match → PASS
|
||||
Reference in New Issue
Block a user