Complete testing system: pyproject.toml (pytest markers), test.sh orchestrator with auto app start/stop and colorful summary, pre-push hook, Gitea Actions workflow. New QA tests: API health (7 endpoints), responsive (3 viewports), log monitoring (ERROR/ORA-/Traceback detection), real GoMag sync, PL/SQL package validation, smoke prod (read-only). Converted test_app_basic.py and test_integration.py to pytest. Added pytestmark to all existing tests (unit/e2e/oracle). E2E conftest upgraded: console error collector, screenshot on failure, auto-detect live app on :5003. Usage: ./test.sh ci (30s) | ./test.sh full (2-3min) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
246 lines
7.7 KiB
Python
246 lines
7.7 KiB
Python
"""
|
|
QA Report Generator — called by conftest.py's pytest_sessionfinish hook.
|
|
"""
|
|
import json
|
|
import os
|
|
import smtplib
|
|
from datetime import date
|
|
from email.mime.text import MIMEText
|
|
from pathlib import Path
|
|
|
|
|
|
CATEGORIES = {
|
|
"Console": {"weight": 0.10, "patterns": ["e2e/"]},
|
|
"Navigation": {"weight": 0.10, "patterns": ["test_page_load", "test_", "_loads"]},
|
|
"Functional": {"weight": 0.15, "patterns": ["e2e/"]},
|
|
"API": {"weight": 0.15, "patterns": ["test_qa_api", "test_api_"]},
|
|
"Responsive": {"weight": 0.10, "patterns": ["test_qa_responsive", "responsive"]},
|
|
"Performance":{"weight": 0.10, "patterns": ["response_time"]},
|
|
"Logs": {"weight": 0.15, "patterns": ["test_qa_logs", "log_monitor"]},
|
|
"Sync/Oracle":{"weight": 0.15, "patterns": ["sync", "plsql", "oracle"]},
|
|
}
|
|
|
|
|
|
def _match_category(nodeid: str, name: str, category: str, patterns: list) -> bool:
|
|
"""Check if a test belongs to a category based on patterns."""
|
|
nodeid_lower = nodeid.lower()
|
|
name_lower = name.lower()
|
|
|
|
if category == "Console":
|
|
return "e2e/" in nodeid_lower
|
|
elif category == "Functional":
|
|
return "e2e/" in nodeid_lower
|
|
elif category == "Navigation":
|
|
return "test_page_load" in name_lower or name_lower.endswith("_loads")
|
|
else:
|
|
for p in patterns:
|
|
if p in nodeid_lower or p in name_lower:
|
|
return True
|
|
return False
|
|
|
|
|
|
def _collect_results(session):
|
|
"""Return list of (nodeid, name, passed, failed, error_msg) for each test."""
|
|
results = []
|
|
for item in session.items:
|
|
nodeid = item.nodeid
|
|
name = item.name
|
|
passed = False
|
|
failed = False
|
|
error_msg = ""
|
|
rep = getattr(item, "rep_call", None)
|
|
if rep is None:
|
|
# try stash
|
|
try:
|
|
rep = item.stash.get(item.config._store, None)
|
|
except Exception:
|
|
pass
|
|
if rep is not None:
|
|
passed = getattr(rep, "passed", False)
|
|
failed = getattr(rep, "failed", False)
|
|
if failed:
|
|
try:
|
|
error_msg = str(rep.longrepr).split("\n")[-1][:200]
|
|
except Exception:
|
|
error_msg = "unknown error"
|
|
results.append((nodeid, name, passed, failed, error_msg))
|
|
return results
|
|
|
|
|
|
def _categorize(results):
|
|
"""Group tests into categories and compute per-category stats."""
|
|
cat_stats = {}
|
|
for cat, cfg in CATEGORIES.items():
|
|
cat_stats[cat] = {
|
|
"weight": cfg["weight"],
|
|
"passed": 0,
|
|
"total": 0,
|
|
"score": 100.0,
|
|
}
|
|
|
|
for r in results:
|
|
nodeid, name, passed = r[0], r[1], r[2]
|
|
for cat, cfg in CATEGORIES.items():
|
|
if _match_category(nodeid, name, cat, cfg["patterns"]):
|
|
cat_stats[cat]["total"] += 1
|
|
if passed:
|
|
cat_stats[cat]["passed"] += 1
|
|
|
|
for cat, stats in cat_stats.items():
|
|
if stats["total"] > 0:
|
|
stats["score"] = (stats["passed"] / stats["total"]) * 100.0
|
|
|
|
return cat_stats
|
|
|
|
|
|
def _compute_health(cat_stats) -> float:
|
|
total = sum(
|
|
(s["score"] / 100.0) * s["weight"] for s in cat_stats.values()
|
|
)
|
|
return round(total * 100, 1)
|
|
|
|
|
|
def _load_baseline(reports_dir: Path):
|
|
baseline_path = reports_dir / "baseline.json"
|
|
if not baseline_path.exists():
|
|
return None
|
|
try:
|
|
with open(baseline_path) as f:
|
|
data = json.load(f)
|
|
# validate minimal keys
|
|
_ = data["health_score"], data["date"]
|
|
return data
|
|
except Exception:
|
|
baseline_path.unlink(missing_ok=True)
|
|
return None
|
|
|
|
|
|
def _save_baseline(reports_dir: Path, health_score, passed, failed, cat_stats):
|
|
baseline_path = reports_dir / "baseline.json"
|
|
try:
|
|
data = {
|
|
"health_score": health_score,
|
|
"date": str(date.today()),
|
|
"passed": passed,
|
|
"failed": failed,
|
|
"categories": {
|
|
cat: {"score": s["score"], "passed": s["passed"], "total": s["total"]}
|
|
for cat, s in cat_stats.items()
|
|
},
|
|
}
|
|
with open(baseline_path, "w") as f:
|
|
json.dump(data, f, indent=2)
|
|
except Exception:
|
|
pass
|
|
|
|
|
|
def _delta_str(health_score, baseline) -> str:
|
|
if baseline is None:
|
|
return ""
|
|
prev = baseline.get("health_score", health_score)
|
|
diff = round(health_score - prev, 1)
|
|
sign = "+" if diff >= 0 else ""
|
|
return f" (baseline: {prev}, {sign}{diff})"
|
|
|
|
|
|
def _build_markdown(health_score, delta, cat_stats, failed_tests, today_str) -> str:
|
|
lines = [
|
|
f"# QA Report — {today_str}",
|
|
"",
|
|
f"## Health Score: {health_score}/100{delta}",
|
|
"",
|
|
"| Category | Score | Weight | Tests |",
|
|
"|----------|-------|--------|-------|",
|
|
]
|
|
|
|
for cat, s in cat_stats.items():
|
|
score_pct = f"{s['score']:.0f}%"
|
|
weight_pct = f"{int(s['weight'] * 100)}%"
|
|
tests_str = f"{s['passed']}/{s['total']} passed" if s["total"] > 0 else "no tests"
|
|
lines.append(f"| {cat} | {score_pct} | {weight_pct} | {tests_str} |")
|
|
|
|
lines += ["", "## Failed Tests"]
|
|
if failed_tests:
|
|
for name, msg in failed_tests:
|
|
lines.append(f"- `{name}`: {msg}")
|
|
else:
|
|
lines.append("_No failed tests._")
|
|
|
|
lines += ["", "## Warnings"]
|
|
if health_score < 70:
|
|
lines.append("- Health score below 70 — review failures before deploy.")
|
|
|
|
return "\n".join(lines) + "\n"
|
|
|
|
|
|
def _send_email(health_score, report_path):
|
|
smtp_host = os.environ.get("SMTP_HOST")
|
|
if not smtp_host:
|
|
return
|
|
try:
|
|
smtp_port = int(os.environ.get("SMTP_PORT", 587))
|
|
smtp_user = os.environ.get("SMTP_USER", "")
|
|
smtp_pass = os.environ.get("SMTP_PASSWORD", "")
|
|
smtp_to = os.environ.get("SMTP_TO", smtp_user)
|
|
|
|
subject = f"QA Alert: Health Score {health_score}/100"
|
|
body = f"Health score dropped to {health_score}/100.\nReport: {report_path}"
|
|
|
|
msg = MIMEText(body)
|
|
msg["Subject"] = subject
|
|
msg["From"] = smtp_user
|
|
msg["To"] = smtp_to
|
|
|
|
with smtplib.SMTP(smtp_host, smtp_port) as server:
|
|
server.ehlo()
|
|
server.starttls()
|
|
if smtp_user:
|
|
server.login(smtp_user, smtp_pass)
|
|
server.sendmail(smtp_user, [smtp_to], msg.as_string())
|
|
except Exception:
|
|
pass
|
|
|
|
|
|
def generate(session, reports_dir: Path):
|
|
"""Generate QA health report. Called from conftest.py pytest_sessionfinish."""
|
|
try:
|
|
reports_dir = Path(reports_dir)
|
|
reports_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
results = _collect_results(session)
|
|
|
|
passed_count = sum(1 for r in results if r[2])
|
|
failed_count = sum(1 for r in results if r[3])
|
|
failed_tests = [(r[1], r[4]) for r in results if r[3]]
|
|
|
|
cat_stats = _categorize(results)
|
|
health_score = _compute_health(cat_stats)
|
|
|
|
baseline = _load_baseline(reports_dir)
|
|
delta = _delta_str(health_score, baseline)
|
|
|
|
today_str = str(date.today())
|
|
report_filename = f"qa-report-{today_str}.md"
|
|
report_path = reports_dir / report_filename
|
|
|
|
md = _build_markdown(health_score, delta, cat_stats, failed_tests, today_str)
|
|
|
|
try:
|
|
with open(report_path, "w") as f:
|
|
f.write(md)
|
|
except Exception:
|
|
pass
|
|
|
|
_save_baseline(reports_dir, health_score, passed_count, failed_count, cat_stats)
|
|
|
|
if health_score < 70:
|
|
_send_email(health_score, report_path)
|
|
|
|
print(f"\n{'═' * 50}")
|
|
print(f" QA HEALTH SCORE: {health_score}/100{delta}")
|
|
print(f" Report: {report_path}")
|
|
print(f"{'═' * 50}\n")
|
|
|
|
except Exception:
|
|
pass
|