288 lines
9.9 KiB
Python
288 lines
9.9 KiB
Python
"""Tests for scripts/append_row.py — append_extraction pipeline."""
|
|
|
|
from __future__ import annotations
|
|
|
|
import csv
|
|
import json
|
|
import re
|
|
import sys
|
|
from datetime import datetime
|
|
from pathlib import Path
|
|
|
|
import pytest
|
|
import yaml
|
|
|
|
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
|
|
|
|
from scripts.append_row import ( # noqa: E402
|
|
CSV_COLUMNS,
|
|
VALID_SOURCES,
|
|
ZI_RO_MAP,
|
|
append_extraction,
|
|
csv_columns,
|
|
)
|
|
|
|
|
|
REPO_ROOT = Path(__file__).resolve().parent.parent
|
|
CALENDAR_PATH = REPO_ROOT / "calendar_evenimente.yaml"
|
|
META_PATH = REPO_ROOT / "data" / "_meta.yaml"
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# helpers / fixtures
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
def _buy_payload(**overrides) -> dict:
|
|
# 2026-05-13 14:23 UTC == 17:23 RO (EEST, Wed) → set A2, zi=Mi.
|
|
base = {
|
|
"screenshot_file": "dia-2026-05-13-1.png",
|
|
"data": "2026-05-13",
|
|
"ora_utc": "14:23",
|
|
"instrument": "DIA",
|
|
"directie": "Buy",
|
|
"tf_mare": "5min",
|
|
"tf_mic": "1min",
|
|
"calitate": "Clară",
|
|
"entry": 400.0,
|
|
"sl": 399.0,
|
|
"tp0": 400.5,
|
|
"tp1": 401.0,
|
|
"tp2": 402.0,
|
|
"risc_pct": 0.25,
|
|
"outcome_path": "TP0→TP1",
|
|
"max_reached": "TP1",
|
|
"be_moved": True,
|
|
"confidence": "high",
|
|
"ambiguities": [],
|
|
"note": "",
|
|
}
|
|
base.update(overrides)
|
|
return base
|
|
|
|
|
|
def _write_payload(tmp_path: Path, name: str, **overrides) -> Path:
|
|
p = tmp_path / name
|
|
p.write_text(json.dumps(_buy_payload(**overrides)), encoding="utf-8")
|
|
return p
|
|
|
|
|
|
def _read_rows(csv_path: Path) -> list[dict[str, str]]:
|
|
with csv_path.open("r", encoding="utf-8", newline="") as fh:
|
|
return list(csv.DictReader(fh))
|
|
|
|
|
|
@pytest.fixture
|
|
def csv_path(tmp_path: Path) -> Path:
|
|
return tmp_path / "jurnal.csv"
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# schema / column layout
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
def test_csv_columns_canonical_29() -> None:
|
|
cols = csv_columns()
|
|
assert len(cols) == 29
|
|
assert cols[0] == "id"
|
|
assert cols[-1] == "note"
|
|
assert cols == list(CSV_COLUMNS)
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# core tests as specified in task #9
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
def test_happy_path(tmp_path: Path, csv_path: Path) -> None:
|
|
j = _write_payload(tmp_path, "t.json")
|
|
result = append_extraction(
|
|
j, "vision", csv_path, META_PATH, CALENDAR_PATH
|
|
)
|
|
assert result["status"] == "ok", result
|
|
assert result["reason"] == ""
|
|
assert result["id"] == 1
|
|
|
|
rows = _read_rows(csv_path)
|
|
assert len(rows) == 1
|
|
r = rows[0]
|
|
assert r["id"] == "1"
|
|
assert r["screenshot_file"] == "dia-2026-05-13-1.png"
|
|
assert r["source"] == "vision"
|
|
assert r["data"] == "2026-05-13"
|
|
assert r["zi"] == "Mi"
|
|
assert r["ora_ro"] == "17:23"
|
|
assert r["ora_utc"] == "14:23"
|
|
assert r["set"] == "A2"
|
|
assert r["instrument"] == "DIA"
|
|
assert r["directie"] == "Buy"
|
|
assert r["be_moved"] == "True"
|
|
|
|
|
|
def test_pl_calc_overlay(tmp_path: Path, csv_path: Path) -> None:
|
|
"""outcome_path=TP0->TP1, max_reached=TP1 → pl_marius=0.5, pl_theoretical=0.333."""
|
|
j = _write_payload(tmp_path, "t.json")
|
|
result = append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
|
assert result["status"] == "ok"
|
|
r = _read_rows(csv_path)[0]
|
|
assert float(r["pl_marius"]) == pytest.approx(0.50)
|
|
assert float(r["pl_theoretical"]) == pytest.approx(0.333)
|
|
|
|
|
|
def test_dedup_same_source(tmp_path: Path, csv_path: Path) -> None:
|
|
j = _write_payload(tmp_path, "t.json")
|
|
r1 = append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
|
r2 = append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
|
assert r1["status"] == "ok"
|
|
assert r2["status"] == "rejected"
|
|
assert "duplicate" in r2["reason"].lower()
|
|
assert r2["id"] is None
|
|
assert r2["row"] is None
|
|
assert len(_read_rows(csv_path)) == 1
|
|
|
|
|
|
def test_dedup_different_source_ok(tmp_path: Path, csv_path: Path) -> None:
|
|
"""Same screenshot_file + different source ⇒ both rows accepted."""
|
|
j = _write_payload(tmp_path, "t.json")
|
|
r1 = append_extraction(
|
|
j, "manual_calibration", csv_path, META_PATH, CALENDAR_PATH
|
|
)
|
|
r2 = append_extraction(
|
|
j, "vision_calibration", csv_path, META_PATH, CALENDAR_PATH
|
|
)
|
|
assert r1["status"] == "ok"
|
|
assert r2["status"] == "ok"
|
|
rows = _read_rows(csv_path)
|
|
assert len(rows) == 2
|
|
assert {r["source"] for r in rows} == {"manual_calibration", "vision_calibration"}
|
|
# Distinct sequential ids.
|
|
assert {r["id"] for r in rows} == {"1", "2"}
|
|
|
|
|
|
def test_invalid_pydantic_rejected(tmp_path: Path, csv_path: Path) -> None:
|
|
"""entry == sl is rejected by pydantic; no CSV is written."""
|
|
j = _write_payload(tmp_path, "bad.json", entry=399.0, sl=399.0)
|
|
result = append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
|
assert result["status"] == "rejected"
|
|
assert "validation" in result["reason"].lower()
|
|
assert not csv_path.exists()
|
|
|
|
|
|
def test_missing_json_file(tmp_path: Path, csv_path: Path) -> None:
|
|
missing = tmp_path / "ghost.json"
|
|
result = append_extraction(
|
|
missing, "vision", csv_path, META_PATH, CALENDAR_PATH
|
|
)
|
|
assert result["status"] == "rejected"
|
|
assert "not found" in result["reason"].lower()
|
|
assert not csv_path.exists()
|
|
|
|
|
|
def test_id_increments(tmp_path: Path, csv_path: Path) -> None:
|
|
paths = [
|
|
_write_payload(tmp_path, "a.json", screenshot_file="a.png"),
|
|
_write_payload(tmp_path, "b.json", screenshot_file="b.png"),
|
|
_write_payload(tmp_path, "c.json", screenshot_file="c.png"),
|
|
]
|
|
ids = []
|
|
for p in paths:
|
|
r = append_extraction(p, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
|
assert r["status"] == "ok"
|
|
ids.append(r["id"])
|
|
assert ids == [1, 2, 3]
|
|
csv_ids = [int(r["id"]) for r in _read_rows(csv_path)]
|
|
assert csv_ids == [1, 2, 3]
|
|
|
|
|
|
def test_set_a2(tmp_path: Path, csv_path: Path) -> None:
|
|
"""Wed 2026-05-13 14:30 UTC → 17:30 RO → A2 sweet spot."""
|
|
j = _write_payload(tmp_path, "t.json", ora_utc="14:30")
|
|
r = append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
|
assert r["status"] == "ok"
|
|
row = _read_rows(csv_path)[0]
|
|
assert row["ora_ro"] == "17:30"
|
|
assert row["zi"] == "Mi"
|
|
assert row["set"] == "A2"
|
|
|
|
|
|
def test_set_c_fomc(tmp_path: Path, csv_path: Path) -> None:
|
|
"""2026-04-29 18:35 UTC == 21:35 RO (FOMC Powell Press window) → Set C."""
|
|
j = _write_payload(
|
|
tmp_path,
|
|
"t.json",
|
|
data="2026-04-29",
|
|
ora_utc="18:35",
|
|
screenshot_file="fomc-apr.png",
|
|
)
|
|
r = append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
|
assert r["status"] == "ok"
|
|
row = _read_rows(csv_path)[0]
|
|
assert row["ora_ro"] == "21:35"
|
|
assert row["set"] == "C"
|
|
|
|
|
|
def test_versions_stamped(tmp_path: Path, csv_path: Path) -> None:
|
|
j = _write_payload(tmp_path, "t.json")
|
|
append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
|
row = _read_rows(csv_path)[0]
|
|
meta = yaml.safe_load(META_PATH.read_text(encoding="utf-8"))
|
|
assert row["indicator_version"] == str(meta["indicator_version"])
|
|
assert row["pl_overlay_version"] == str(meta["pl_overlay_version"])
|
|
assert row["csv_schema_version"] == str(meta["csv_schema_version"])
|
|
|
|
|
|
def test_extracted_at_format(tmp_path: Path, csv_path: Path) -> None:
|
|
j = _write_payload(tmp_path, "t.json")
|
|
append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
|
val = _read_rows(csv_path)[0]["extracted_at"]
|
|
# ISO 8601 UTC with trailing 'Z': YYYY-MM-DDTHH:MM:SSZ
|
|
assert re.match(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z$", val), val
|
|
# Round-trip through datetime.fromisoformat after dropping the Z.
|
|
parsed = datetime.fromisoformat(val[:-1])
|
|
assert parsed.year >= 2026
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# additional safety nets
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
def test_invalid_source_rejected(tmp_path: Path, csv_path: Path) -> None:
|
|
j = _write_payload(tmp_path, "t.json")
|
|
r = append_extraction(j, "auto_magic", csv_path, META_PATH, CALENDAR_PATH)
|
|
assert r["status"] == "rejected"
|
|
assert "source" in r["reason"].lower()
|
|
assert not csv_path.exists()
|
|
|
|
|
|
def test_all_valid_sources_accepted(tmp_path: Path, csv_path: Path) -> None:
|
|
for i, src in enumerate(sorted(VALID_SOURCES)):
|
|
j = _write_payload(tmp_path, f"t{i}.json", screenshot_file=f"s{i}.png")
|
|
r = append_extraction(j, src, csv_path, META_PATH, CALENDAR_PATH)
|
|
assert r["status"] == "ok", (src, r)
|
|
rows = _read_rows(csv_path)
|
|
assert {r["source"] for r in rows} == set(VALID_SOURCES)
|
|
|
|
|
|
def test_atomic_write_leaves_no_tmp(tmp_path: Path, csv_path: Path) -> None:
|
|
j = _write_payload(tmp_path, "t.json")
|
|
append_extraction(j, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
|
leftovers = [p for p in csv_path.parent.iterdir() if p.name.endswith(".tmp")]
|
|
assert leftovers == []
|
|
|
|
|
|
def test_zi_ro_map_covers_all_weekdays() -> None:
|
|
"""Internal sanity: the Romanian-day map covers all 7 short weekday names."""
|
|
assert set(ZI_RO_MAP.keys()) == {"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"}
|
|
assert set(ZI_RO_MAP.values()) == {"Lu", "Ma", "Mi", "Jo", "Vi", "Sa", "Du"}
|
|
|
|
|
|
def test_malformed_json_rejected(tmp_path: Path, csv_path: Path) -> None:
|
|
bad = tmp_path / "broken.json"
|
|
bad.write_text("{not valid json", encoding="utf-8")
|
|
r = append_extraction(bad, "vision", csv_path, META_PATH, CALENDAR_PATH)
|
|
assert r["status"] == "rejected"
|
|
assert "validation" in r["reason"].lower() or "json" in r["reason"].lower()
|
|
assert not csv_path.exists()
|