Compare commits
8 Commits
094c6be5a9
...
4b494eb2f2
| Author | SHA1 | Date | |
|---|---|---|---|
| 4b494eb2f2 | |||
| 36a38a1e26 | |||
| deb86c705f | |||
| 51e56af557 | |||
| 655ed3ae09 | |||
| e06a79d98c | |||
| b95395ec2c | |||
| 86384b38e3 |
@@ -269,9 +269,9 @@
|
||||
"prompt": "Heartbeat check. Rulează src/heartbeat.py printr-un scurt raport de status.\nDacă nu e nimic de raportat (email=0, calendar nu are evenimente <2h, kb ok), răspunde doar cu HEARTBEAT_OK și oprește-te — nu trimite mesaj.\nDacă e ceva: raport scurt pe Discord #echo-work.",
|
||||
"allowed_tools": [],
|
||||
"enabled": true,
|
||||
"last_run": "2026-04-26T16:00:00.003767+00:00",
|
||||
"last_run": "2026-04-26T18:00:00.003601+00:00",
|
||||
"last_status": "ok",
|
||||
"next_run": "2026-04-26T18:00:00+00:00"
|
||||
"next_run": "2026-04-27T06:00:00+00:00"
|
||||
},
|
||||
{
|
||||
"name": "night-execute",
|
||||
|
||||
@@ -40,6 +40,7 @@ from handlers.files import FilesHandlers # noqa: E402
|
||||
from handlers.git import GitHandlers # noqa: E402
|
||||
from handlers.habits import HabitsHandlers # noqa: E402
|
||||
from handlers.pdf import PDFHandlers # noqa: E402
|
||||
from handlers.ralph import RalphHandlers # noqa: E402
|
||||
from handlers.workspace import WorkspaceHandlers # noqa: E402
|
||||
from handlers.youtube import YoutubeHandlers # noqa: E402
|
||||
|
||||
@@ -95,6 +96,7 @@ class TaskBoardHandler(
|
||||
PDFHandlers,
|
||||
YoutubeHandlers,
|
||||
WorkspaceHandlers,
|
||||
RalphHandlers,
|
||||
CronHandlers,
|
||||
SimpleHTTPRequestHandler,
|
||||
):
|
||||
@@ -155,6 +157,23 @@ class TaskBoardHandler(
|
||||
self.handle_eco_logs()
|
||||
elif self.path == '/api/eco/doctor':
|
||||
self.handle_eco_doctor()
|
||||
elif self.path == '/api/ralph/status' or self.path.startswith('/api/ralph/status?'):
|
||||
self.handle_ralph_status()
|
||||
elif self.path.startswith('/api/ralph/'):
|
||||
# /api/ralph/<slug>/log or /api/ralph/<slug>/prd
|
||||
parts = self.path.split('?', 1)[0].split('/')
|
||||
# parts: ['', 'api', 'ralph', '<slug>', '<action>']
|
||||
if len(parts) >= 5:
|
||||
slug = parts[3]
|
||||
action = parts[4]
|
||||
if action == 'log':
|
||||
self.handle_ralph_log(slug)
|
||||
elif action == 'prd':
|
||||
self.handle_ralph_prd(slug)
|
||||
else:
|
||||
self.send_error(404)
|
||||
else:
|
||||
self.send_error(404)
|
||||
elif self.path.startswith('/api/'):
|
||||
self.send_error(404)
|
||||
else:
|
||||
@@ -214,6 +233,13 @@ class TaskBoardHandler(
|
||||
self.handle_eco_git_commit()
|
||||
elif self.path == '/api/eco/restart-taskboard':
|
||||
self.handle_eco_restart_taskboard()
|
||||
elif self.path.startswith('/api/ralph/') and self.path.endswith('/stop'):
|
||||
parts = self.path.split('?', 1)[0].split('/')
|
||||
if len(parts) >= 5:
|
||||
slug = parts[3]
|
||||
self.handle_ralph_stop(slug)
|
||||
else:
|
||||
self.send_error(404)
|
||||
else:
|
||||
self.send_error(404)
|
||||
|
||||
|
||||
305
dashboard/handlers/ralph.py
Normal file
305
dashboard/handlers/ralph.py
Normal file
@@ -0,0 +1,305 @@
|
||||
"""Ralph live dashboard endpoints (W3).
|
||||
|
||||
Endpoints:
|
||||
GET /api/ralph/status — toate proiectele Ralph (cards data)
|
||||
GET /api/ralph/<slug>/log — tail progress.txt (default 100 lines)
|
||||
GET /api/ralph/<slug>/prd — full prd.json content
|
||||
POST /api/ralph/<slug>/stop — SIGTERM la Ralph PID
|
||||
|
||||
Polling: 5s din ralph.html (suficient pentru iter 8-15min Ralph).
|
||||
NU SSE/WebSocket pentru MVP.
|
||||
|
||||
Citește status din `~/workspace/<slug>/scripts/ralph/`:
|
||||
- prd.json → stories (passes/failed/blocked/retries)
|
||||
- progress.txt → log human-readable
|
||||
- logs/iteration-*.log → mtime ultimului iter
|
||||
- .ralph.pid → PID activ (verificat cu os.kill 0)
|
||||
|
||||
Reuse path constants din `dashboard/constants.py` (WORKSPACE_DIR).
|
||||
"""
|
||||
import json
|
||||
import os
|
||||
import signal
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from urllib.parse import unquote
|
||||
|
||||
import constants
|
||||
|
||||
|
||||
# Path Ralph per proiect (mereu în scripts/ralph/)
|
||||
def _ralph_dir(project_dir: Path) -> Path:
|
||||
return project_dir / "scripts" / "ralph"
|
||||
|
||||
|
||||
# Estimare ETA simplistă: avg iter time × stories rămase
|
||||
DEFAULT_ITER_MINUTES = 12 # midpoint din intervalul 8-15min menționat în plan
|
||||
|
||||
|
||||
class RalphHandlers:
|
||||
"""Mixin pentru /api/ralph/* — Ralph live status + control."""
|
||||
|
||||
# ── helpers ────────────────────────────────────────────────
|
||||
def _ralph_validate_slug(self, slug: str):
|
||||
"""Validează slug-ul + returnează project_dir sau None."""
|
||||
if not slug or "/" in slug or ".." in slug:
|
||||
return None
|
||||
slug = unquote(slug)
|
||||
project_dir = constants.WORKSPACE_DIR / slug
|
||||
try:
|
||||
resolved = project_dir.resolve()
|
||||
workspace_resolved = constants.WORKSPACE_DIR.resolve()
|
||||
resolved.relative_to(workspace_resolved)
|
||||
except (ValueError, OSError):
|
||||
return None
|
||||
if not project_dir.exists() or not project_dir.is_dir():
|
||||
return None
|
||||
return project_dir
|
||||
|
||||
def _ralph_pid_alive(self, ralph_dir: Path):
|
||||
"""Întoarce (running: bool, pid: int|None)."""
|
||||
pid_file = ralph_dir / ".ralph.pid"
|
||||
if not pid_file.exists():
|
||||
return False, None
|
||||
try:
|
||||
pid = int(pid_file.read_text().strip())
|
||||
os.kill(pid, 0) # signal 0 = check existence
|
||||
return True, pid
|
||||
except (ValueError, ProcessLookupError, PermissionError, OSError):
|
||||
return False, None
|
||||
|
||||
def _ralph_eta_minutes(self, stories_remaining: int, last_iter_mtime: float | None) -> int | None:
|
||||
"""Estimează minute rămase — None dacă nu avem date."""
|
||||
if stories_remaining <= 0:
|
||||
return 0
|
||||
return stories_remaining * DEFAULT_ITER_MINUTES
|
||||
|
||||
def _ralph_summarize_project(self, project_dir: Path) -> dict | None:
|
||||
"""Construiește dict de status per proiect — None dacă nu e Ralph project."""
|
||||
ralph_dir = _ralph_dir(project_dir)
|
||||
prd_json = ralph_dir / "prd.json"
|
||||
if not prd_json.exists():
|
||||
return None
|
||||
|
||||
# Defensive parse — corupt prd.json nu trebuie să dărâme dashboard
|
||||
try:
|
||||
prd = json.loads(prd_json.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
return {
|
||||
"slug": project_dir.name,
|
||||
"status": "error",
|
||||
"error": "prd.json invalid sau ilizibil",
|
||||
"running": False,
|
||||
"pid": None,
|
||||
"stories": [],
|
||||
"storiesTotal": 0,
|
||||
"storiesComplete": 0,
|
||||
"storiesFailed": 0,
|
||||
"storiesBlocked": 0,
|
||||
}
|
||||
|
||||
stories = prd.get("userStories", []) or []
|
||||
total = len(stories)
|
||||
complete = sum(1 for s in stories if s.get("passes"))
|
||||
failed = sum(1 for s in stories if s.get("failed"))
|
||||
blocked = sum(1 for s in stories if s.get("blocked"))
|
||||
remaining = total - complete - failed - blocked
|
||||
|
||||
running, pid = self._ralph_pid_alive(ralph_dir)
|
||||
|
||||
# Last iteration mtime (pentru "acum X")
|
||||
logs_dir = ralph_dir / "logs"
|
||||
last_iter_mtime = None
|
||||
last_iter_iso = None
|
||||
if logs_dir.exists():
|
||||
iter_logs = sorted(logs_dir.glob("iteration-*.log"), key=lambda f: f.stat().st_mtime, reverse=True)
|
||||
if iter_logs:
|
||||
last_iter_mtime = iter_logs[0].stat().st_mtime
|
||||
last_iter_iso = datetime.fromtimestamp(last_iter_mtime).isoformat()
|
||||
|
||||
# Status compus pentru UI cards
|
||||
if running:
|
||||
top_status = "running"
|
||||
elif failed > 0 and remaining == 0:
|
||||
top_status = "failed"
|
||||
elif complete == total and total > 0:
|
||||
top_status = "complete"
|
||||
elif blocked > 0 and running is False:
|
||||
top_status = "blocked"
|
||||
else:
|
||||
top_status = "idle"
|
||||
|
||||
# Current story (DAG-eligible cel mai mic priority)
|
||||
current_story = None
|
||||
if running:
|
||||
eligible = [
|
||||
s for s in stories
|
||||
if not s.get("passes") and not s.get("failed") and not s.get("blocked")
|
||||
]
|
||||
eligible.sort(key=lambda s: (s.get("priority", 999), s.get("id", "")))
|
||||
if eligible:
|
||||
current_story = {
|
||||
"id": eligible[0].get("id"),
|
||||
"title": eligible[0].get("title"),
|
||||
"tags": eligible[0].get("tags", []),
|
||||
"retries": eligible[0].get("retries", 0),
|
||||
}
|
||||
|
||||
return {
|
||||
"slug": project_dir.name,
|
||||
"status": top_status,
|
||||
"running": running,
|
||||
"pid": pid,
|
||||
"branchName": prd.get("branchName", ""),
|
||||
"storiesTotal": total,
|
||||
"storiesComplete": complete,
|
||||
"storiesFailed": failed,
|
||||
"storiesBlocked": blocked,
|
||||
"storiesRemaining": remaining,
|
||||
"currentStory": current_story,
|
||||
"lastIterAt": last_iter_iso,
|
||||
"etaMinutes": self._ralph_eta_minutes(remaining, last_iter_mtime),
|
||||
"stories": [
|
||||
{
|
||||
"id": s.get("id"),
|
||||
"title": s.get("title"),
|
||||
"passes": bool(s.get("passes")),
|
||||
"failed": bool(s.get("failed")),
|
||||
"blocked": bool(s.get("blocked")),
|
||||
"retries": int(s.get("retries", 0)),
|
||||
"tags": s.get("tags", []),
|
||||
"failureReason": s.get("failureReason", ""),
|
||||
}
|
||||
for s in stories
|
||||
],
|
||||
}
|
||||
|
||||
# ── /api/ralph/status (GET) ────────────────────────────────
|
||||
def handle_ralph_status(self):
|
||||
"""Întoarce status pentru toate proiectele Ralph din workspace."""
|
||||
try:
|
||||
projects = []
|
||||
if not constants.WORKSPACE_DIR.exists():
|
||||
self.send_json({"projects": [], "fetchedAt": datetime.now().isoformat()})
|
||||
return
|
||||
|
||||
for entry in sorted(constants.WORKSPACE_DIR.iterdir()):
|
||||
if not entry.is_dir() or entry.name.startswith("."):
|
||||
continue
|
||||
summary = self._ralph_summarize_project(entry)
|
||||
if summary is not None:
|
||||
projects.append(summary)
|
||||
|
||||
self.send_json({
|
||||
"projects": projects,
|
||||
"fetchedAt": datetime.now().isoformat(),
|
||||
"count": len(projects),
|
||||
})
|
||||
except Exception as exc:
|
||||
self.send_json({"error": str(exc)}, 500)
|
||||
|
||||
# ── /api/ralph/<slug>/log (GET) ────────────────────────────
|
||||
def handle_ralph_log(self, slug: str):
|
||||
"""Tail progress.txt pentru un slug. Default last 100 lines."""
|
||||
try:
|
||||
project_dir = self._ralph_validate_slug(slug)
|
||||
if not project_dir:
|
||||
self.send_json({"error": "Invalid project slug"}, 400)
|
||||
return
|
||||
|
||||
from urllib.parse import parse_qs, urlparse
|
||||
qs = parse_qs(urlparse(self.path).query)
|
||||
try:
|
||||
lines_n = min(int(qs.get("lines", ["100"])[0]), 1000)
|
||||
except ValueError:
|
||||
lines_n = 100
|
||||
|
||||
progress = _ralph_dir(project_dir) / "progress.txt"
|
||||
if not progress.exists():
|
||||
self.send_json({"slug": slug, "lines": [], "total": 0})
|
||||
return
|
||||
|
||||
try:
|
||||
content = progress.read_text(encoding="utf-8", errors="replace")
|
||||
except OSError as exc:
|
||||
self.send_json({"error": f"read failed: {exc}"}, 500)
|
||||
return
|
||||
|
||||
all_lines = content.splitlines()
|
||||
tail = all_lines[-lines_n:] if len(all_lines) > lines_n else all_lines
|
||||
self.send_json({
|
||||
"slug": slug,
|
||||
"lines": tail,
|
||||
"total": len(all_lines),
|
||||
})
|
||||
except Exception as exc:
|
||||
self.send_json({"error": str(exc)}, 500)
|
||||
|
||||
# ── /api/ralph/<slug>/prd (GET) ────────────────────────────
|
||||
def handle_ralph_prd(self, slug: str):
|
||||
"""Returnează full prd.json pentru un slug."""
|
||||
try:
|
||||
project_dir = self._ralph_validate_slug(slug)
|
||||
if not project_dir:
|
||||
self.send_json({"error": "Invalid project slug"}, 400)
|
||||
return
|
||||
|
||||
prd_json = _ralph_dir(project_dir) / "prd.json"
|
||||
if not prd_json.exists():
|
||||
self.send_json({"error": "prd.json not found"}, 404)
|
||||
return
|
||||
|
||||
try:
|
||||
data = json.loads(prd_json.read_text(encoding="utf-8"))
|
||||
except json.JSONDecodeError as exc:
|
||||
self.send_json({"error": f"prd.json invalid: {exc}"}, 500)
|
||||
return
|
||||
|
||||
self.send_json(data)
|
||||
except Exception as exc:
|
||||
self.send_json({"error": str(exc)}, 500)
|
||||
|
||||
# ── /api/ralph/<slug>/stop (POST) ──────────────────────────
|
||||
def handle_ralph_stop(self, slug: str):
|
||||
"""Trimite SIGTERM la Ralph PID. Verifică că PID-ul e în WORKSPACE_DIR."""
|
||||
try:
|
||||
project_dir = self._ralph_validate_slug(slug)
|
||||
if not project_dir:
|
||||
self.send_json({"success": False, "error": "Invalid project slug"}, 400)
|
||||
return
|
||||
|
||||
ralph_dir = _ralph_dir(project_dir)
|
||||
pid_file = ralph_dir / ".ralph.pid"
|
||||
if not pid_file.exists():
|
||||
self.send_json({"success": False, "error": "No PID file"}, 404)
|
||||
return
|
||||
|
||||
try:
|
||||
pid = int(pid_file.read_text().strip())
|
||||
except (ValueError, OSError) as exc:
|
||||
self.send_json({"success": False, "error": f"Invalid PID file: {exc}"}, 500)
|
||||
return
|
||||
|
||||
# Sandbox: verifică că procesul e în workspace (nu omoară random PID)
|
||||
try:
|
||||
proc_cwd = Path(f"/proc/{pid}/cwd").resolve()
|
||||
if not str(proc_cwd).startswith(str(constants.WORKSPACE_DIR)):
|
||||
self.send_json({"success": False, "error": "PID not in workspace"}, 403)
|
||||
return
|
||||
except (FileNotFoundError, PermissionError):
|
||||
# Procesul nu mai există — best-effort cleanup
|
||||
self.send_json({"success": True, "message": "Process already stopped"})
|
||||
return
|
||||
|
||||
try:
|
||||
os.killpg(os.getpgid(pid), signal.SIGTERM)
|
||||
except ProcessLookupError:
|
||||
self.send_json({"success": True, "message": "Process already stopped"})
|
||||
return
|
||||
except PermissionError:
|
||||
self.send_json({"success": False, "error": "Permission denied"}, 403)
|
||||
return
|
||||
|
||||
self.send_json({"success": True, "message": f"Ralph stopped (PID {pid})"})
|
||||
except Exception as exc:
|
||||
self.send_json({"success": False, "error": str(exc)}, 500)
|
||||
615
dashboard/ralph.html
Normal file
615
dashboard/ralph.html
Normal file
@@ -0,0 +1,615 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="ro">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<link rel="icon" type="image/svg+xml" href="/echo/favicon.svg">
|
||||
<title>Echo · Ralph</title>
|
||||
<link rel="stylesheet" href="/echo/common.css">
|
||||
<script src="https://unpkg.com/lucide@latest/dist/umd/lucide.min.js"></script>
|
||||
<script src="/echo/swipe-nav.js"></script>
|
||||
<style>
|
||||
/* ==========================================
|
||||
Ralph status extension tokens
|
||||
(existing common.css NU declară --status-*)
|
||||
========================================== */
|
||||
:root {
|
||||
--status-running: rgb(34, 197, 94); /* green */
|
||||
--status-blocked: rgb(245, 158, 11); /* amber */
|
||||
--status-failed: rgb(239, 68, 68); /* red */
|
||||
--status-complete: rgb(156, 163, 175); /* slate (done = neutral) */
|
||||
--status-idle: var(--text-muted);
|
||||
}
|
||||
|
||||
/* ==========================================
|
||||
Layout
|
||||
========================================== */
|
||||
.main {
|
||||
max-width: 1400px;
|
||||
margin: 0 auto;
|
||||
padding: var(--space-5);
|
||||
}
|
||||
|
||||
.page-header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
gap: var(--space-3);
|
||||
margin-bottom: var(--space-5);
|
||||
}
|
||||
|
||||
.page-title {
|
||||
font-size: var(--text-xl);
|
||||
font-weight: 600;
|
||||
color: var(--text-primary);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: var(--space-2);
|
||||
}
|
||||
|
||||
.page-subtitle {
|
||||
font-size: var(--text-sm);
|
||||
color: var(--text-muted);
|
||||
}
|
||||
|
||||
/* Live indicator pulse */
|
||||
.live-indicator {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: var(--space-2);
|
||||
font-size: var(--text-sm);
|
||||
color: var(--text-muted);
|
||||
padding: var(--space-1) var(--space-3);
|
||||
background: var(--bg-surface);
|
||||
border-radius: var(--radius-full);
|
||||
}
|
||||
|
||||
.live-dot {
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
border-radius: 50%;
|
||||
background: var(--status-running);
|
||||
animation: pulse 2s ease-in-out infinite;
|
||||
}
|
||||
|
||||
@keyframes pulse {
|
||||
0%, 100% { opacity: 1; transform: scale(1); }
|
||||
50% { opacity: 0.5; transform: scale(1.2); }
|
||||
}
|
||||
|
||||
.last-fetch {
|
||||
font-size: var(--text-xs);
|
||||
color: var(--text-muted);
|
||||
}
|
||||
|
||||
/* ==========================================
|
||||
Cards grid
|
||||
========================================== */
|
||||
.ralph-grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(3, minmax(0, 1fr));
|
||||
gap: var(--space-4);
|
||||
}
|
||||
|
||||
@media (max-width: 1024px) {
|
||||
.ralph-grid { grid-template-columns: repeat(2, minmax(0, 1fr)); }
|
||||
}
|
||||
|
||||
@media (max-width: 640px) {
|
||||
.ralph-grid { grid-template-columns: 1fr; }
|
||||
}
|
||||
|
||||
.ralph-card {
|
||||
background: var(--bg-surface);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: var(--radius-lg);
|
||||
padding: var(--space-4);
|
||||
min-height: 180px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: var(--space-3);
|
||||
transition: border-color var(--transition-fast);
|
||||
}
|
||||
|
||||
.ralph-card:hover {
|
||||
border-color: var(--border-focus);
|
||||
}
|
||||
|
||||
.ralph-card-head {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
gap: var(--space-2);
|
||||
}
|
||||
|
||||
.ralph-slug {
|
||||
font-size: var(--text-base);
|
||||
font-weight: 600;
|
||||
color: var(--text-primary);
|
||||
font-family: var(--font-mono);
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
|
||||
.ralph-status {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: var(--space-1);
|
||||
padding: 2px 10px;
|
||||
font-size: var(--text-xs);
|
||||
font-weight: 600;
|
||||
border-radius: var(--radius-full);
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.04em;
|
||||
}
|
||||
|
||||
.ralph-status[data-status="running"] { background: rgba(34, 197, 94, 0.18); color: var(--status-running); }
|
||||
.ralph-status[data-status="blocked"] { background: rgba(245, 158, 11, 0.18); color: var(--status-blocked); }
|
||||
.ralph-status[data-status="failed"] { background: rgba(239, 68, 68, 0.18); color: var(--status-failed); }
|
||||
.ralph-status[data-status="complete"] { background: rgba(156, 163, 175, 0.18); color: var(--status-complete); }
|
||||
.ralph-status[data-status="idle"] { background: var(--bg-surface-active); color: var(--status-idle); }
|
||||
.ralph-status[data-status="error"] { background: rgba(239, 68, 68, 0.18); color: var(--status-failed); }
|
||||
|
||||
.ralph-status-dot {
|
||||
width: 6px;
|
||||
height: 6px;
|
||||
border-radius: 50%;
|
||||
background: currentColor;
|
||||
}
|
||||
|
||||
.ralph-card-body {
|
||||
flex: 1;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: var(--space-2);
|
||||
}
|
||||
|
||||
.ralph-current {
|
||||
font-size: var(--text-sm);
|
||||
color: var(--text-secondary);
|
||||
line-height: 1.4;
|
||||
}
|
||||
|
||||
.ralph-current-id {
|
||||
font-family: var(--font-mono);
|
||||
color: var(--text-primary);
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.ralph-tags {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: var(--space-1);
|
||||
}
|
||||
|
||||
.ralph-tag {
|
||||
font-size: var(--text-xs);
|
||||
padding: 1px 8px;
|
||||
background: var(--accent-subtle);
|
||||
color: var(--accent);
|
||||
border-radius: var(--radius-sm);
|
||||
font-family: var(--font-mono);
|
||||
}
|
||||
|
||||
/* Progress bar */
|
||||
.ralph-progress {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: var(--space-1);
|
||||
}
|
||||
|
||||
.ralph-progress-meta {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
font-size: var(--text-xs);
|
||||
color: var(--text-muted);
|
||||
}
|
||||
|
||||
.ralph-progress-bar {
|
||||
height: 6px;
|
||||
background: var(--bg-surface-active);
|
||||
border-radius: var(--radius-full);
|
||||
overflow: hidden;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.ralph-progress-fill {
|
||||
height: 100%;
|
||||
background: var(--status-complete);
|
||||
transition: width var(--transition-base);
|
||||
}
|
||||
|
||||
.ralph-card[data-status="running"] .ralph-progress-fill { background: var(--status-running); }
|
||||
.ralph-card[data-status="failed"] .ralph-progress-fill { background: var(--status-failed); }
|
||||
.ralph-card[data-status="blocked"] .ralph-progress-fill { background: var(--status-blocked); }
|
||||
|
||||
.ralph-card-foot {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
gap: var(--space-2);
|
||||
font-size: var(--text-xs);
|
||||
color: var(--text-muted);
|
||||
}
|
||||
|
||||
.ralph-actions {
|
||||
display: flex;
|
||||
gap: var(--space-1);
|
||||
}
|
||||
|
||||
.ralph-icon-btn {
|
||||
background: transparent;
|
||||
border: 1px solid var(--border);
|
||||
color: var(--text-muted);
|
||||
border-radius: var(--radius-sm);
|
||||
cursor: pointer;
|
||||
padding: 6px;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
min-width: 32px;
|
||||
min-height: 32px;
|
||||
transition: all var(--transition-fast);
|
||||
}
|
||||
|
||||
.ralph-icon-btn:hover {
|
||||
color: var(--text-primary);
|
||||
background: var(--bg-surface-hover);
|
||||
}
|
||||
|
||||
.ralph-icon-btn.danger {
|
||||
color: var(--status-failed);
|
||||
border-color: rgba(239, 68, 68, 0.4);
|
||||
}
|
||||
|
||||
.ralph-icon-btn.danger:hover {
|
||||
background: rgba(239, 68, 68, 0.12);
|
||||
}
|
||||
|
||||
.ralph-icon-btn svg {
|
||||
width: 14px;
|
||||
height: 14px;
|
||||
}
|
||||
|
||||
@media (max-width: 640px) {
|
||||
.ralph-icon-btn {
|
||||
min-width: 44px;
|
||||
min-height: 44px;
|
||||
}
|
||||
.ralph-icon-btn svg {
|
||||
width: 18px;
|
||||
height: 18px;
|
||||
}
|
||||
}
|
||||
|
||||
/* Empty / loading / error states */
|
||||
.ralph-empty,
|
||||
.ralph-loading,
|
||||
.ralph-error {
|
||||
text-align: center;
|
||||
padding: var(--space-10) var(--space-5);
|
||||
color: var(--text-muted);
|
||||
}
|
||||
|
||||
.ralph-empty svg,
|
||||
.ralph-loading svg,
|
||||
.ralph-error svg {
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
margin-bottom: var(--space-3);
|
||||
opacity: 0.6;
|
||||
}
|
||||
|
||||
.ralph-empty-title {
|
||||
font-size: var(--text-base);
|
||||
color: var(--text-secondary);
|
||||
margin-bottom: var(--space-1);
|
||||
}
|
||||
|
||||
/* ==========================================
|
||||
Drawer (log + PRD viewer)
|
||||
========================================== */
|
||||
.ralph-drawer {
|
||||
position: fixed;
|
||||
inset: 0;
|
||||
background: rgba(0, 0, 0, 0.55);
|
||||
display: none;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
z-index: 200;
|
||||
padding: var(--space-4);
|
||||
}
|
||||
|
||||
.ralph-drawer[data-open="true"] {
|
||||
display: flex;
|
||||
}
|
||||
|
||||
.ralph-drawer-content {
|
||||
background: var(--bg-base);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: var(--radius-lg);
|
||||
max-width: 900px;
|
||||
width: 100%;
|
||||
max-height: 85vh;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.ralph-drawer-head {
|
||||
padding: var(--space-3) var(--space-4);
|
||||
border-bottom: 1px solid var(--border);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
gap: var(--space-3);
|
||||
}
|
||||
|
||||
.ralph-drawer-title {
|
||||
font-size: var(--text-base);
|
||||
font-weight: 600;
|
||||
color: var(--text-primary);
|
||||
font-family: var(--font-mono);
|
||||
}
|
||||
|
||||
.ralph-drawer-body {
|
||||
flex: 1;
|
||||
overflow: auto;
|
||||
padding: var(--space-4);
|
||||
}
|
||||
|
||||
.ralph-drawer-pre {
|
||||
font-family: var(--font-mono);
|
||||
font-size: var(--text-xs);
|
||||
color: var(--text-secondary);
|
||||
white-space: pre-wrap;
|
||||
word-break: break-word;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<!--NAV-->
|
||||
|
||||
<main class="main">
|
||||
<header class="page-header">
|
||||
<div>
|
||||
<div class="page-title">
|
||||
<i data-lucide="bot" aria-hidden="true"></i>
|
||||
Echo · Ralph
|
||||
</div>
|
||||
<div class="page-subtitle">Live status pe proiectele autonome (polling 5s)</div>
|
||||
</div>
|
||||
<div class="live-indicator" aria-live="polite">
|
||||
<span class="live-dot" aria-hidden="true"></span>
|
||||
<span id="liveLabel">Live</span>
|
||||
<span class="last-fetch" id="lastFetch"></span>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<section id="ralphContent" aria-live="polite">
|
||||
<div class="ralph-loading">
|
||||
<i data-lucide="loader" aria-hidden="true"></i>
|
||||
<div>Se încarcă proiectele Ralph...</div>
|
||||
</div>
|
||||
</section>
|
||||
</main>
|
||||
|
||||
<!-- Drawer pentru log / PRD viewer -->
|
||||
<div class="ralph-drawer" id="ralphDrawer" data-open="false" role="dialog" aria-modal="true" aria-labelledby="drawerTitle">
|
||||
<div class="ralph-drawer-content">
|
||||
<div class="ralph-drawer-head">
|
||||
<div class="ralph-drawer-title" id="drawerTitle">—</div>
|
||||
<button type="button" class="ralph-icon-btn" id="drawerClose" aria-label="Închide drawer">
|
||||
<i data-lucide="x" aria-hidden="true"></i>
|
||||
</button>
|
||||
</div>
|
||||
<div class="ralph-drawer-body">
|
||||
<pre class="ralph-drawer-pre" id="drawerBody"></pre>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
(function () {
|
||||
const POLL_MS = 5000;
|
||||
const contentEl = document.getElementById('ralphContent');
|
||||
const lastFetchEl = document.getElementById('lastFetch');
|
||||
const liveLabel = document.getElementById('liveLabel');
|
||||
const drawer = document.getElementById('ralphDrawer');
|
||||
const drawerTitle = document.getElementById('drawerTitle');
|
||||
const drawerBody = document.getElementById('drawerBody');
|
||||
const drawerClose = document.getElementById('drawerClose');
|
||||
|
||||
function fmtAgo(iso) {
|
||||
if (!iso) return '—';
|
||||
const t = new Date(iso).getTime();
|
||||
if (isNaN(t)) return '—';
|
||||
const diff = Math.max(0, Date.now() - t);
|
||||
const sec = Math.floor(diff / 1000);
|
||||
if (sec < 60) return `acum ${sec}s`;
|
||||
const min = Math.floor(sec / 60);
|
||||
if (min < 60) return `acum ${min}m`;
|
||||
const hr = Math.floor(min / 60);
|
||||
if (hr < 24) return `acum ${hr}h`;
|
||||
const day = Math.floor(hr / 24);
|
||||
return `acum ${day}z`;
|
||||
}
|
||||
|
||||
function escapeHtml(s) {
|
||||
return String(s == null ? '' : s)
|
||||
.replace(/&/g, '&').replace(/</g, '<')
|
||||
.replace(/>/g, '>').replace(/"/g, '"');
|
||||
}
|
||||
|
||||
function renderCard(p) {
|
||||
const total = p.storiesTotal || 0;
|
||||
const done = p.storiesComplete || 0;
|
||||
const failed = p.storiesFailed || 0;
|
||||
const blocked = p.storiesBlocked || 0;
|
||||
const pct = total > 0 ? Math.round(((done + failed + blocked) / total) * 100) : 0;
|
||||
|
||||
const current = p.currentStory
|
||||
? `<div class="ralph-current"><span class="ralph-current-id">${escapeHtml(p.currentStory.id)}</span> · ${escapeHtml(p.currentStory.title || '')} ` +
|
||||
(p.currentStory.retries ? `<span title="retries">(${p.currentStory.retries}/3)</span>` : '') + `</div>` +
|
||||
(p.currentStory.tags && p.currentStory.tags.length
|
||||
? `<div class="ralph-tags">${p.currentStory.tags.map(t => `<span class="ralph-tag">${escapeHtml(t)}</span>`).join('')}</div>`
|
||||
: '')
|
||||
: (p.status === 'complete'
|
||||
? `<div class="ralph-current">Toate stories complete (${done}/${total}).</div>`
|
||||
: `<div class="ralph-current" style="color:var(--text-muted)">Nu rulează acum.</div>`);
|
||||
|
||||
const eta = (p.etaMinutes != null && p.status === 'running')
|
||||
? `~${p.etaMinutes}min`
|
||||
: '';
|
||||
|
||||
const stopBtn = p.running
|
||||
? `<button type="button" class="ralph-icon-btn danger" data-action="stop" data-slug="${escapeHtml(p.slug)}" aria-label="Oprește Ralph">
|
||||
<i data-lucide="square" aria-hidden="true"></i>
|
||||
</button>`
|
||||
: '';
|
||||
|
||||
return `
|
||||
<article class="ralph-card" data-status="${escapeHtml(p.status)}">
|
||||
<header class="ralph-card-head">
|
||||
<div class="ralph-slug" title="${escapeHtml(p.slug)}">${escapeHtml(p.slug)}</div>
|
||||
<span class="ralph-status" data-status="${escapeHtml(p.status)}" aria-label="Status: ${escapeHtml(p.status)}">
|
||||
<span class="ralph-status-dot" aria-hidden="true"></span>${escapeHtml(p.status)}
|
||||
</span>
|
||||
</header>
|
||||
<div class="ralph-card-body">
|
||||
${current}
|
||||
<div class="ralph-progress">
|
||||
<div class="ralph-progress-meta">
|
||||
<span>${done}/${total} done${failed ? ` · ${failed} failed` : ''}${blocked ? ` · ${blocked} blocked` : ''}</span>
|
||||
<span>${eta}</span>
|
||||
</div>
|
||||
<div class="ralph-progress-bar" role="progressbar" aria-valuenow="${pct}" aria-valuemin="0" aria-valuemax="100">
|
||||
<div class="ralph-progress-fill" style="width:${pct}%"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<footer class="ralph-card-foot">
|
||||
<span title="Ultima iterație">${fmtAgo(p.lastIterAt)}</span>
|
||||
<div class="ralph-actions">
|
||||
<button type="button" class="ralph-icon-btn" data-action="log" data-slug="${escapeHtml(p.slug)}" aria-label="Vezi log">
|
||||
<i data-lucide="terminal" aria-hidden="true"></i>
|
||||
</button>
|
||||
<button type="button" class="ralph-icon-btn" data-action="prd" data-slug="${escapeHtml(p.slug)}" aria-label="Vezi PRD">
|
||||
<i data-lucide="file-text" aria-hidden="true"></i>
|
||||
</button>
|
||||
${stopBtn}
|
||||
</div>
|
||||
</footer>
|
||||
</article>`;
|
||||
}
|
||||
|
||||
function renderEmpty() {
|
||||
return `
|
||||
<div class="ralph-empty">
|
||||
<i data-lucide="inbox" aria-hidden="true"></i>
|
||||
<div class="ralph-empty-title">Niciun proiect aprobat.</div>
|
||||
<div>Aprobă ceva pe Discord/Telegram cu <code>/a <slug></code>.</div>
|
||||
</div>`;
|
||||
}
|
||||
|
||||
function renderError(msg) {
|
||||
return `
|
||||
<div class="ralph-error">
|
||||
<i data-lucide="alert-triangle" aria-hidden="true"></i>
|
||||
<div>Cannot reach Echo Core: ${escapeHtml(msg)}</div>
|
||||
</div>`;
|
||||
}
|
||||
|
||||
async function fetchStatus() {
|
||||
try {
|
||||
const res = await fetch('/api/ralph/status', { cache: 'no-store' });
|
||||
if (!res.ok) throw new Error('HTTP ' + res.status);
|
||||
const data = await res.json();
|
||||
const projects = data.projects || [];
|
||||
if (projects.length === 0) {
|
||||
contentEl.innerHTML = renderEmpty();
|
||||
} else {
|
||||
contentEl.innerHTML = `<div class="ralph-grid">${projects.map(renderCard).join('')}</div>`;
|
||||
}
|
||||
lastFetchEl.textContent = '· ' + fmtAgo(data.fetchedAt);
|
||||
liveLabel.textContent = 'Live';
|
||||
if (window.lucide) lucide.createIcons();
|
||||
} catch (err) {
|
||||
contentEl.innerHTML = renderError(err.message || String(err));
|
||||
liveLabel.textContent = 'Offline';
|
||||
if (window.lucide) lucide.createIcons();
|
||||
}
|
||||
}
|
||||
|
||||
async function openLog(slug) {
|
||||
drawerTitle.textContent = `${slug} · progress.txt`;
|
||||
drawerBody.textContent = 'Se încarcă...';
|
||||
drawer.dataset.open = 'true';
|
||||
try {
|
||||
const res = await fetch(`/api/ralph/${encodeURIComponent(slug)}/log?lines=200`);
|
||||
const data = await res.json();
|
||||
drawerBody.textContent = (data.lines || []).join('\n');
|
||||
} catch (err) {
|
||||
drawerBody.textContent = `Error: ${err.message || err}`;
|
||||
}
|
||||
}
|
||||
|
||||
async function openPrd(slug) {
|
||||
drawerTitle.textContent = `${slug} · prd.json`;
|
||||
drawerBody.textContent = 'Se încarcă...';
|
||||
drawer.dataset.open = 'true';
|
||||
try {
|
||||
const res = await fetch(`/api/ralph/${encodeURIComponent(slug)}/prd`);
|
||||
const data = await res.json();
|
||||
drawerBody.textContent = JSON.stringify(data, null, 2);
|
||||
} catch (err) {
|
||||
drawerBody.textContent = `Error: ${err.message || err}`;
|
||||
}
|
||||
}
|
||||
|
||||
async function stopRalph(slug) {
|
||||
if (!confirm(`Oprești Ralph pe ${slug}?`)) return;
|
||||
try {
|
||||
const res = await fetch(`/api/ralph/${encodeURIComponent(slug)}/stop`, { method: 'POST' });
|
||||
const data = await res.json();
|
||||
if (!data.success) {
|
||||
alert('Eșec: ' + (data.error || 'unknown'));
|
||||
} else {
|
||||
fetchStatus();
|
||||
}
|
||||
} catch (err) {
|
||||
alert('Eroare: ' + (err.message || err));
|
||||
}
|
||||
}
|
||||
|
||||
contentEl.addEventListener('click', (e) => {
|
||||
const btn = e.target.closest('[data-action]');
|
||||
if (!btn) return;
|
||||
const slug = btn.dataset.slug;
|
||||
const action = btn.dataset.action;
|
||||
if (action === 'log') openLog(slug);
|
||||
else if (action === 'prd') openPrd(slug);
|
||||
else if (action === 'stop') stopRalph(slug);
|
||||
});
|
||||
|
||||
drawerClose.addEventListener('click', () => {
|
||||
drawer.dataset.open = 'false';
|
||||
});
|
||||
|
||||
drawer.addEventListener('click', (e) => {
|
||||
if (e.target === drawer) drawer.dataset.open = 'false';
|
||||
});
|
||||
|
||||
document.addEventListener('keydown', (e) => {
|
||||
if (e.key === 'Escape') drawer.dataset.open = 'false';
|
||||
});
|
||||
|
||||
// Boot + poll
|
||||
fetchStatus();
|
||||
setInterval(fetchStatus, POLL_MS);
|
||||
if (window.lucide) lucide.createIcons();
|
||||
})();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
31
memory/kb/coaching/playlist-transe-meditatii.md
Normal file
31
memory/kb/coaching/playlist-transe-meditatii.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Playlist Transe Ghidate & Meditații
|
||||
|
||||
**Sursa:** https://www.youtube.com/playlist?list=PL7shaFF3OaeUG2wIyHa5akgKn3KHVYPHS
|
||||
**Salvat:** 2026-04-26
|
||||
**Tags:** @growth @health @meditatie
|
||||
|
||||
---
|
||||
|
||||
Playlist cu muzică instrumentală potrivită pentru transe ghidate sau meditații.
|
||||
|
||||
## Melodii
|
||||
|
||||
1. Brides - Stamatis Spanoudakis
|
||||
2. Brides - Stamatis Spanoudakis
|
||||
3. Secret Garden - Song from a Secret Garden
|
||||
4. Secret Garden - Adagio
|
||||
5. HAUSER - Serenade
|
||||
6. Joe Hisaishi - Merry-Go-Round of Life (Howl's Moving Castle)
|
||||
7. Ludovico Einaudi - Experience
|
||||
8. Ludovico Einaudi - Nuvole Bianche
|
||||
9. Two Steps From Hell - Heart of Courage (Extended)
|
||||
10. Two Steps From Hell - Heart of Courage (1 hour)
|
||||
11. Thomas Bergersen - Empire of Angels
|
||||
12. Gabriel's Oboe
|
||||
13. Vangelis - 1492: Conquest of Paradise (Main Theme)
|
||||
14. Yann Tiersen - Comptine d'un autre été (Amélie)
|
||||
15. Yann Tiersen - Comptine d'un autre été (1 hour)
|
||||
|
||||
---
|
||||
|
||||
*Instrumentale clasice și cinematic — Einaudi, Vangelis, Hisaishi, Tiersen, Secret Garden.*
|
||||
@@ -1,5 +1,24 @@
|
||||
{
|
||||
"notes": [
|
||||
{
|
||||
"file": "notes-data/coaching/playlist-transe-meditatii.md",
|
||||
"title": "Playlist Transe Ghidate & Meditații",
|
||||
"date": "2026-04-26",
|
||||
"tags": [],
|
||||
"domains": [
|
||||
"growth",
|
||||
"health"
|
||||
],
|
||||
"types": [
|
||||
"meditatie",
|
||||
"coaching"
|
||||
],
|
||||
"category": "coaching",
|
||||
"project": null,
|
||||
"subdir": null,
|
||||
"video": "",
|
||||
"tldr": "*Instrumentale clasice și cinematic — Einaudi, Vangelis, Hisaishi, Tiersen, Secret Garden.*"
|
||||
},
|
||||
{
|
||||
"file": "notes-data/tools/infrastructure.md",
|
||||
"title": "Infrastructură (Proxmox + Docker)",
|
||||
@@ -8520,8 +8539,8 @@
|
||||
"title": "Proiect: Vending Master - Integrare Website → ROA",
|
||||
"date": "2026-01-30",
|
||||
"tags": [
|
||||
"vending-master",
|
||||
"integrare"
|
||||
"integrare",
|
||||
"vending-master"
|
||||
],
|
||||
"domains": [
|
||||
"work"
|
||||
@@ -9001,17 +9020,17 @@
|
||||
}
|
||||
],
|
||||
"stats": {
|
||||
"total": 517,
|
||||
"total": 518,
|
||||
"by_domain": {
|
||||
"work": 163,
|
||||
"health": 97,
|
||||
"growth": 233,
|
||||
"health": 98,
|
||||
"growth": 234,
|
||||
"sprijin": 39,
|
||||
"scout": 8
|
||||
},
|
||||
"by_category": {
|
||||
"articole": 1,
|
||||
"coaching": 49,
|
||||
"coaching": 50,
|
||||
"conversations": 0,
|
||||
"emails": 18,
|
||||
"exercitii": 4,
|
||||
|
||||
67
prompts/planning_agent.md
Normal file
67
prompts/planning_agent.md
Normal file
@@ -0,0 +1,67 @@
|
||||
# Echo planning agent — system prompt
|
||||
|
||||
Ești **Echo**, asistentul lui Marius, în rol de **agent de planning conversational**. Marius
|
||||
te-a chemat să porți cu el o conversație multi-fază despre un feature, până se naște un plan
|
||||
implementabil. La final, tu (sau o fază ulterioară) scrii `final-plan.md` în repo-ul țintă,
|
||||
iar Ralph îl execută noaptea pe stories.
|
||||
|
||||
## Context curent
|
||||
|
||||
- **Slug proiect:** `{slug}`
|
||||
- **Descriere inițială:** {description}
|
||||
- **Faza curentă:** `{phase}`
|
||||
- **Repo țintă (CWD):** `~/workspace/{slug}/`
|
||||
- **Artefacte gstack anterioare:** `~/.gstack/projects/{slug}/` (citește înainte să întrebi
|
||||
lucruri pe care alte faze le-au lămurit deja)
|
||||
- **Output final:** `~/workspace/{slug}/scripts/ralph/final-plan.md`
|
||||
|
||||
## Voce / ton
|
||||
|
||||
Cald + colaborativ, ca un coleg cu care construiești ceva. „Hai să...", „ce-ți dorești", „noi"
|
||||
— niciodată „Please provide", „Submit", „Approve". Răspunde în limba lui Marius (română default;
|
||||
dacă scrie EN, mergi EN). Concis: 3-6 propoziții per turn, nu eseuri.
|
||||
|
||||
## Cum coordonezi cu skill-urile gstack
|
||||
|
||||
Faza curentă e numele unui skill gstack (`/office-hours`, `/plan-ceo-review`,
|
||||
`/plan-eng-review`, `/plan-design-review`). Când primești prima invocare a fazei, urmează skill-ul
|
||||
ca de obicei — el îți dă structura. Nu re-rula skill-ul în interiorul aceleiași sesiuni decât
|
||||
dacă Marius cere explicit.
|
||||
|
||||
Fiecare fază rulează într-un **subprocess Claude separat** (fresh `claude -p`). Sesiunea
|
||||
precedentă a salvat un artifact pe disc (`~/.gstack/projects/{slug}/...`); citește-l ca să nu îl
|
||||
întrebi pe Marius lucruri lămurite deja.
|
||||
|
||||
## Reguli de output
|
||||
|
||||
1. **Întrebări pentru Marius** — pune-i 1–3 întrebări la rând, nu 10. AskUserQuestion gstack se
|
||||
serializează ca text simplu — nu te bloca în tool-use când ești în `-p` mode.
|
||||
2. **Marker de progres** — când consideri faza completă în mintea ta, închide turnul cu o
|
||||
linie pe ultim rand:
|
||||
```
|
||||
PHASE_STATUS: ready_to_advance
|
||||
```
|
||||
Echo (orchestratorul) o citește și îi prezintă lui Marius butonul „Continuă faza".
|
||||
Dacă mai ai nevoie de input, închide cu `PHASE_STATUS: needs_input`.
|
||||
3. **Artifact pe disc** — la sfârșitul fazei tale, scrie sau actualizează artifactul în
|
||||
`~/.gstack/projects/{slug}/{user}-{phase}-...md` conform convenției skill-ului. Nu inventa
|
||||
path-uri noi — folosește exact ce skill-ul gstack creează implicit.
|
||||
4. **Final plan** — în ultima fază (sau când Marius spune explicit „gata"), scrie
|
||||
`~/workspace/{slug}/scripts/ralph/final-plan.md` cu secțiunile:
|
||||
- Context (de ce această schimbare)
|
||||
- Architecture overview
|
||||
- User stories preliminare (Ralph PRD generator le va structura ulterior)
|
||||
- Implementation hints
|
||||
- Verification approach (smoke tests, ce gates relevante)
|
||||
5. **Niciodată nu rula** comenzi destructive fără confirmare. Nu modifica fișiere în afara
|
||||
`~/workspace/{slug}/` și `~/.gstack/projects/{slug}/`.
|
||||
|
||||
## Granițe
|
||||
|
||||
- Nu ai voie să atingi `src/router.py`, `src/claude_session.py`, `src/planning_session.py`,
|
||||
`src/planning_orchestrator.py` sau alte fișiere core din `echo-core` — chiar dacă Marius îți
|
||||
cere ceva care ar implica asta, întoarce-te la el cu „asta e core Echo, fac eu pe master".
|
||||
- Nu inventa decizii arhitecturale fără să ai semnal de la Marius. Dacă te blochează lipsă de
|
||||
context, întreabă-l pe el direct.
|
||||
- Cost / rate-limit: Marius e pe subscription Anthropic, deci ignoră US$. Dar ține-te scurt —
|
||||
fiecare turn consumă rate-limit budget.
|
||||
@@ -23,6 +23,16 @@ from src.router import (
|
||||
_ralph_status,
|
||||
_ralph_stop,
|
||||
_load_approved_tasks,
|
||||
planning_advance,
|
||||
planning_approve,
|
||||
planning_cancel,
|
||||
start_planning_session,
|
||||
)
|
||||
from src.adapters.discord_views import (
|
||||
RalphRootView,
|
||||
PlanningActiveView,
|
||||
PlanningFinalView,
|
||||
_split_chunks,
|
||||
)
|
||||
|
||||
logger = logging.getLogger("echo-core.discord")
|
||||
@@ -933,8 +943,28 @@ def create_bot(config: Config) -> discord.Client:
|
||||
) -> list[app_commands.Choice[str]]:
|
||||
return await _autocomplete_by_status(interaction, current, ("running", "approved"))
|
||||
|
||||
async def _ralph_autocomplete_workspace(
|
||||
interaction: discord.Interaction, current: str
|
||||
) -> list[app_commands.Choice[str]]:
|
||||
from pathlib import Path
|
||||
ws = Path("/home/moltbot/workspace")
|
||||
if not ws.exists():
|
||||
return []
|
||||
current_low = (current or "").lower()
|
||||
choices: list[app_commands.Choice[str]] = []
|
||||
for p in sorted(ws.iterdir()):
|
||||
if not p.is_dir() or p.name.startswith("."):
|
||||
continue
|
||||
if current_low and current_low not in p.name.lower():
|
||||
continue
|
||||
choices.append(app_commands.Choice(name=p.name, value=p.name))
|
||||
if len(choices) >= 25:
|
||||
break
|
||||
return choices
|
||||
|
||||
@tree.command(name="p", description="Propose new Ralph project")
|
||||
@app_commands.describe(slug="Project slug (e.g. game-library)", description="Short description of what to do")
|
||||
@app_commands.autocomplete(slug=_ralph_autocomplete_workspace)
|
||||
async def ralph_p(
|
||||
interaction: discord.Interaction, slug: str, description: str
|
||||
) -> None:
|
||||
@@ -949,9 +979,12 @@ def create_bot(config: Config) -> discord.Client:
|
||||
slugs = [slug] if slug else []
|
||||
await interaction.response.send_message(_ralph_approve(slugs))
|
||||
|
||||
@tree.command(name="l", description="List Ralph projects status")
|
||||
@tree.command(name="l", description="List Ralph projects (interactive)")
|
||||
async def ralph_l(interaction: discord.Interaction) -> None:
|
||||
await interaction.response.send_message(_ralph_status())
|
||||
view = RalphRootView()
|
||||
await interaction.response.send_message(
|
||||
view.render_summary(), view=view, ephemeral=True
|
||||
)
|
||||
|
||||
@tree.command(name="k", description="Stop a running Ralph project")
|
||||
@app_commands.describe(slug="Project slug to stop")
|
||||
@@ -961,6 +994,67 @@ def create_bot(config: Config) -> discord.Client:
|
||||
) -> None:
|
||||
await interaction.response.send_message(_ralph_stop(slug))
|
||||
|
||||
# ---- Planning agent (W2) ---------------------------------------------
|
||||
|
||||
@tree.command(name="plan", description="Pornește o sesiune de planning conversational pentru un proiect")
|
||||
@app_commands.describe(
|
||||
slug="Project slug (folosește /p ca să-l adaugi întâi)",
|
||||
description="Descriere opțională (default: cea din approved-tasks.json)",
|
||||
)
|
||||
@app_commands.autocomplete(slug=_ralph_autocomplete_pending)
|
||||
async def plan_cmd(
|
||||
interaction: discord.Interaction,
|
||||
slug: str,
|
||||
description: str | None = None,
|
||||
) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
# Resolve description: explicit param wins, else look up in approved-tasks.
|
||||
desc = (description or "").strip()
|
||||
if not desc:
|
||||
try:
|
||||
data = _load_approved_tasks()
|
||||
for p in data.get("projects", []):
|
||||
if p.get("name", "").lower() == slug.lower():
|
||||
desc = p.get("description") or ""
|
||||
break
|
||||
except Exception:
|
||||
logger.exception("approved-tasks lookup failed")
|
||||
if not desc:
|
||||
await interaction.followup.send(
|
||||
f"Nu am descriere pentru `{slug}`. Adaugă cu `/p {slug} <descriere>` "
|
||||
"sau pasează `description` la `/plan`.",
|
||||
ephemeral=True,
|
||||
)
|
||||
return
|
||||
|
||||
channel_id = str(interaction.channel_id)
|
||||
await interaction.followup.send(
|
||||
f"🧠 Pornesc planning pentru `{slug}`… (durează ~60s)", ephemeral=True
|
||||
)
|
||||
try:
|
||||
first = await asyncio.to_thread(
|
||||
start_planning_session, slug, desc, channel_id, "discord",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("start_planning_session failed for %s", slug)
|
||||
await interaction.followup.send(f"Planning blocat: {e}", ephemeral=True)
|
||||
return
|
||||
for chunk in _split_chunks(first):
|
||||
await interaction.followup.send(chunk, ephemeral=True)
|
||||
await interaction.followup.send(
|
||||
"Răspunde aici. Apasă **Continuă faza** când ești gata să trec la următoarea.",
|
||||
view=PlanningActiveView(),
|
||||
ephemeral=True,
|
||||
)
|
||||
|
||||
@tree.command(name="cancel", description="Anulează sesiunea de planning curentă")
|
||||
async def cancel_planning_cmd(interaction: discord.Interaction) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
text = await asyncio.to_thread(
|
||||
planning_cancel, str(interaction.channel_id), "discord",
|
||||
)
|
||||
await interaction.followup.send(text, ephemeral=True)
|
||||
|
||||
# --- Events ---
|
||||
|
||||
@client.event
|
||||
@@ -1005,6 +1099,7 @@ def create_bot(config: Config) -> discord.Client:
|
||||
response, _is_cmd = await asyncio.to_thread(
|
||||
route_message, channel_id, user_id, text,
|
||||
on_text=on_text,
|
||||
adapter_name="discord",
|
||||
)
|
||||
|
||||
# Only send the final combined response if no intermediates
|
||||
|
||||
393
src/adapters/discord_views.py
Normal file
393
src/adapters/discord_views.py
Normal file
@@ -0,0 +1,393 @@
|
||||
"""Discord interactive Views for Ralph — root list + per-project actions + propose modal.
|
||||
|
||||
Critical pattern: every button callback that does I/O MUST call
|
||||
`await interaction.response.defer(ephemeral=True)` FIRST, then use
|
||||
`interaction.followup.send(...)`. Otherwise Discord's 3s interaction
|
||||
timeout kicks in and the user sees "Interaction failed".
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
import discord
|
||||
|
||||
from src.router import (
|
||||
_load_approved_tasks,
|
||||
_ralph_approve,
|
||||
_ralph_propose,
|
||||
_ralph_status,
|
||||
_ralph_stop,
|
||||
planning_advance,
|
||||
planning_approve,
|
||||
planning_cancel,
|
||||
start_planning_session,
|
||||
)
|
||||
from src.planning_session import is_in_planning
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
WORKSPACE_DIR = Path("/home/moltbot/workspace")
|
||||
|
||||
# Status → emoji prefix for project labels
|
||||
_STATUS_EMOJI = {
|
||||
"pending": "📋",
|
||||
"approved": "⏳",
|
||||
"running": "🟢",
|
||||
"complete": "✅",
|
||||
"failed": "❌",
|
||||
"stopped": "⏹",
|
||||
}
|
||||
|
||||
VIEW_TIMEOUT = 600 # 10 min — matches ralph_flow TTL
|
||||
|
||||
|
||||
def _project_label(name: str, status: str | None) -> str:
|
||||
"""Return a short button label like '🟢 roa2web' (max 80 chars per Discord)."""
|
||||
emoji = _STATUS_EMOJI.get(status or "", "·")
|
||||
label = f"{emoji} {name}"
|
||||
return label[:80]
|
||||
|
||||
|
||||
def _list_workspace_projects() -> list[str]:
|
||||
"""Return workspace folder names sorted, skipping hidden dirs."""
|
||||
if not WORKSPACE_DIR.exists():
|
||||
return []
|
||||
return sorted(
|
||||
p.name for p in WORKSPACE_DIR.iterdir()
|
||||
if p.is_dir() and not p.name.startswith(".")
|
||||
)
|
||||
|
||||
|
||||
def _project_status(name: str) -> str | None:
|
||||
"""Look up status of a project from approved-tasks.json (or None)."""
|
||||
try:
|
||||
data = _load_approved_tasks()
|
||||
except Exception:
|
||||
return None
|
||||
for p in data.get("projects", []):
|
||||
if p.get("name", "").lower() == name.lower():
|
||||
return p.get("status")
|
||||
return None
|
||||
|
||||
|
||||
def _read_prd(name: str) -> dict | None:
|
||||
prd_path = WORKSPACE_DIR / name / "scripts" / "ralph" / "prd.json"
|
||||
if not prd_path.exists():
|
||||
return None
|
||||
try:
|
||||
return json.loads(prd_path.read_text(encoding="utf-8"))
|
||||
except (ValueError, OSError):
|
||||
return None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# RalphProposeModal — text input for new feature description
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class RalphProposeModal(discord.ui.Modal, title="Propune feature Ralph"):
|
||||
"""Modal asking the user for a feature description for a given project."""
|
||||
|
||||
description: discord.ui.TextInput = discord.ui.TextInput(
|
||||
label="Descriere",
|
||||
placeholder="Ce trebuie făcut? (1-3 propoziții)",
|
||||
style=discord.TextStyle.paragraph,
|
||||
required=True,
|
||||
max_length=1000,
|
||||
)
|
||||
|
||||
def __init__(self, slug: str) -> None:
|
||||
super().__init__(timeout=VIEW_TIMEOUT)
|
||||
self.slug = slug
|
||||
self.title = f"Propune feature: {slug}"[:45]
|
||||
|
||||
async def on_submit(self, interaction: discord.Interaction) -> None:
|
||||
try:
|
||||
result = _ralph_propose(self.slug, str(self.description.value).strip())
|
||||
except Exception as e:
|
||||
log.exception("Ralph propose modal failed for %s", self.slug)
|
||||
await interaction.response.send_message(
|
||||
f"Eroare la propunere: {e}", ephemeral=True
|
||||
)
|
||||
return
|
||||
await interaction.response.send_message(result, ephemeral=True)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# RalphProjectView — per-project action buttons
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class RalphProjectView(discord.ui.View):
|
||||
"""Buttons: Propune feature / Vezi PRD / Start acum / Status / Stop / Înapoi."""
|
||||
|
||||
def __init__(self, slug: str) -> None:
|
||||
super().__init__(timeout=VIEW_TIMEOUT)
|
||||
self.slug = slug
|
||||
|
||||
@discord.ui.button(label="➕ Propune feature", style=discord.ButtonStyle.primary, row=0)
|
||||
async def propose(self, interaction: discord.Interaction, button: discord.ui.Button) -> None:
|
||||
await interaction.response.send_modal(RalphProposeModal(self.slug))
|
||||
|
||||
@discord.ui.button(label="👁 Vezi PRD", style=discord.ButtonStyle.secondary, row=0)
|
||||
async def view_prd(self, interaction: discord.Interaction, button: discord.ui.Button) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
prd = _read_prd(self.slug)
|
||||
if prd is None:
|
||||
await interaction.followup.send(
|
||||
f"Nu există PRD pentru {self.slug}. Aprobă cu /a {self.slug} și night-execute îl generează.",
|
||||
ephemeral=True,
|
||||
)
|
||||
return
|
||||
stories = prd.get("userStories", [])
|
||||
done = sum(1 for s in stories if s.get("passes"))
|
||||
total = len(stories)
|
||||
lines = [f"**PRD pentru {self.slug}** — {done}/{total} stories"]
|
||||
for s in stories[:12]:
|
||||
mark = "✅" if s.get("passes") else "⏳"
|
||||
sid = s.get("id", "?")
|
||||
title = (s.get("title") or "")[:80]
|
||||
lines.append(f"{mark} `{sid}` {title}")
|
||||
if total > 12:
|
||||
lines.append(f"\n…și încă {total - 12} stories.")
|
||||
await interaction.followup.send("\n".join(lines), ephemeral=True)
|
||||
|
||||
@discord.ui.button(label="📊 Status", style=discord.ButtonStyle.secondary, row=0)
|
||||
async def status(self, interaction: discord.Interaction, button: discord.ui.Button) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
text = _ralph_status(self.slug)
|
||||
await interaction.followup.send(text, ephemeral=True)
|
||||
|
||||
@discord.ui.button(label="✅ Aprobă pentru tonight", style=discord.ButtonStyle.success, row=1)
|
||||
async def approve(self, interaction: discord.Interaction, button: discord.ui.Button) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
text = _ralph_approve([self.slug])
|
||||
await interaction.followup.send(text, ephemeral=True)
|
||||
|
||||
@discord.ui.button(label="🛑 Stop", style=discord.ButtonStyle.danger, row=1)
|
||||
async def stop(self, interaction: discord.Interaction, button: discord.ui.Button) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
text = _ralph_stop(self.slug)
|
||||
await interaction.followup.send(text, ephemeral=True)
|
||||
|
||||
@discord.ui.button(label="🧠 Planifică", style=discord.ButtonStyle.primary, row=2)
|
||||
async def plan(self, interaction: discord.Interaction, button: discord.ui.Button) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
# Look up description from approved-tasks.json
|
||||
description = ""
|
||||
try:
|
||||
data = _load_approved_tasks()
|
||||
for p in data.get("projects", []):
|
||||
if p.get("name", "").lower() == self.slug.lower():
|
||||
description = p.get("description") or ""
|
||||
break
|
||||
except Exception:
|
||||
log.exception("approved-tasks lookup failed")
|
||||
if not description:
|
||||
await interaction.followup.send(
|
||||
f"Nu am descriere pentru `{self.slug}`. "
|
||||
f"Adaugă mai întâi cu `/p {self.slug} <descriere>`.",
|
||||
ephemeral=True,
|
||||
)
|
||||
return
|
||||
channel_id = str(interaction.channel_id)
|
||||
await interaction.followup.send(
|
||||
f"🧠 Pornesc planning pentru `{self.slug}`… (durează ~60s)",
|
||||
ephemeral=True,
|
||||
)
|
||||
try:
|
||||
first = start_planning_session(
|
||||
self.slug, description, channel_id, "discord",
|
||||
)
|
||||
except Exception as e:
|
||||
log.exception("start_planning_session failed for %s", self.slug)
|
||||
await interaction.followup.send(f"Planning blocat: {e}", ephemeral=True)
|
||||
return
|
||||
# Send first message of the planning agent + active keyboard
|
||||
for chunk in _split_chunks(first, 1900):
|
||||
await interaction.followup.send(chunk, ephemeral=True)
|
||||
await interaction.followup.send(
|
||||
"Răspunde aici. Apasă **Continuă faza** când ești gata să trec la următoarea.",
|
||||
view=PlanningActiveView(),
|
||||
ephemeral=True,
|
||||
)
|
||||
|
||||
@discord.ui.button(label="🔙 Înapoi", style=discord.ButtonStyle.secondary, row=2)
|
||||
async def back(self, interaction: discord.Interaction, button: discord.ui.Button) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
view = RalphRootView()
|
||||
content = view.render_summary()
|
||||
await interaction.edit_original_response(content=content, view=view)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# RalphRootView — workspace + active projects landing
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class _ProjectButton(discord.ui.Button):
|
||||
"""Dynamic button representing a single project; opens RalphProjectView."""
|
||||
|
||||
def __init__(self, slug: str, status: str | None, row: int) -> None:
|
||||
super().__init__(
|
||||
label=_project_label(slug, status),
|
||||
style=discord.ButtonStyle.secondary,
|
||||
row=row,
|
||||
custom_id=f"ralph_project:{slug}",
|
||||
)
|
||||
self.slug = slug
|
||||
|
||||
async def callback(self, interaction: discord.Interaction) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
view = RalphProjectView(self.slug)
|
||||
content = f"**{self.slug}**\nAlege o acțiune:"
|
||||
await interaction.edit_original_response(content=content, view=view)
|
||||
|
||||
|
||||
class _RefreshButton(discord.ui.Button):
|
||||
def __init__(self) -> None:
|
||||
super().__init__(
|
||||
label="🔄 Reîncarcă",
|
||||
style=discord.ButtonStyle.primary,
|
||||
row=4,
|
||||
custom_id="ralph_refresh",
|
||||
)
|
||||
|
||||
async def callback(self, interaction: discord.Interaction) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
view = RalphRootView()
|
||||
await interaction.edit_original_response(
|
||||
content=view.render_summary(), view=view
|
||||
)
|
||||
|
||||
|
||||
class _CloseButton(discord.ui.Button):
|
||||
def __init__(self) -> None:
|
||||
super().__init__(
|
||||
label="❌ Închide",
|
||||
style=discord.ButtonStyle.danger,
|
||||
row=4,
|
||||
custom_id="ralph_close",
|
||||
)
|
||||
|
||||
async def callback(self, interaction: discord.Interaction) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
await interaction.edit_original_response(content="Închis.", view=None)
|
||||
|
||||
|
||||
def _split_chunks(text: str, limit: int = 1900) -> list[str]:
|
||||
"""Split a long message into Discord-safe chunks."""
|
||||
if len(text) <= limit:
|
||||
return [text]
|
||||
chunks: list[str] = []
|
||||
while text:
|
||||
if len(text) <= limit:
|
||||
chunks.append(text)
|
||||
break
|
||||
cut = text.rfind("\n", 0, limit)
|
||||
if cut == -1:
|
||||
cut = limit
|
||||
chunks.append(text[:cut])
|
||||
text = text[cut:].lstrip("\n")
|
||||
return chunks
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Planning views (W2) — buttons that drive the planning conversation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class PlanningActiveView(discord.ui.View):
|
||||
"""Buttons shown DURING an active planning session: advance phase / cancel."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__(timeout=VIEW_TIMEOUT)
|
||||
|
||||
@discord.ui.button(label="▶️ Continuă faza", style=discord.ButtonStyle.primary, row=0)
|
||||
async def advance(self, interaction: discord.Interaction, button: discord.ui.Button) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
channel_id = str(interaction.channel_id)
|
||||
try:
|
||||
text, completed = planning_advance(channel_id, "discord")
|
||||
except Exception as e:
|
||||
log.exception("planning advance failed")
|
||||
await interaction.followup.send(f"Eroare: {e}", ephemeral=True)
|
||||
return
|
||||
for chunk in _split_chunks(text):
|
||||
await interaction.followup.send(chunk, ephemeral=True)
|
||||
view: discord.ui.View = (
|
||||
PlanningFinalView() if completed else PlanningActiveView()
|
||||
)
|
||||
await interaction.followup.send(
|
||||
("Plan gata. Confirmi?" if completed else "Continuăm?"),
|
||||
view=view, ephemeral=True,
|
||||
)
|
||||
|
||||
@discord.ui.button(label="🛑 Anulează", style=discord.ButtonStyle.danger, row=0)
|
||||
async def cancel(self, interaction: discord.Interaction, button: discord.ui.Button) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
text = planning_cancel(str(interaction.channel_id), "discord")
|
||||
await interaction.followup.send(text, ephemeral=True)
|
||||
|
||||
|
||||
class PlanningFinalView(discord.ui.View):
|
||||
"""Buttons shown when ALL planning phases finished — Dau drumul / Anulează."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__(timeout=VIEW_TIMEOUT)
|
||||
|
||||
@discord.ui.button(label="✅ Dau drumul tonight", style=discord.ButtonStyle.success, row=0)
|
||||
async def approve(self, interaction: discord.Interaction, button: discord.ui.Button) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
text = planning_approve(str(interaction.channel_id), "discord")
|
||||
await interaction.followup.send(text, ephemeral=True)
|
||||
|
||||
@discord.ui.button(label="🛑 Anulează", style=discord.ButtonStyle.danger, row=0)
|
||||
async def cancel(self, interaction: discord.Interaction, button: discord.ui.Button) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
text = planning_cancel(str(interaction.channel_id), "discord")
|
||||
await interaction.followup.send(text, ephemeral=True)
|
||||
|
||||
|
||||
class RalphRootView(discord.ui.View):
|
||||
"""Landing view: workspace projects with status emoji + refresh + close."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__(timeout=VIEW_TIMEOUT)
|
||||
self._build_buttons()
|
||||
|
||||
def _build_buttons(self) -> None:
|
||||
projects = _list_workspace_projects()
|
||||
# Discord limit: 5 buttons per row × 5 rows. Reserve last row for refresh/close.
|
||||
# Project buttons go on rows 0..3 (max 20 projects).
|
||||
for idx, slug in enumerate(projects[:20]):
|
||||
row = idx // 5
|
||||
status = _project_status(slug)
|
||||
self.add_item(_ProjectButton(slug, status, row=row))
|
||||
self.add_item(_RefreshButton())
|
||||
self.add_item(_CloseButton())
|
||||
|
||||
def render_summary(self) -> str:
|
||||
"""Build the message text shown above the buttons."""
|
||||
try:
|
||||
data = _load_approved_tasks()
|
||||
except Exception:
|
||||
data = {"projects": []}
|
||||
active = [
|
||||
p for p in data.get("projects", [])
|
||||
if p.get("status") in ("pending", "approved", "running")
|
||||
]
|
||||
lines = ["📋 **Proiecte Ralph**"]
|
||||
if active:
|
||||
lines.append("")
|
||||
lines.append("**Active:**")
|
||||
for p in active[:10]:
|
||||
emoji = _STATUS_EMOJI.get(p.get("status", ""), "·")
|
||||
desc = (p.get("description") or "")[:60]
|
||||
lines.append(f"{emoji} `{p.get('name')}` — {desc}")
|
||||
lines.append("")
|
||||
lines.append("Apasă pe un proiect pentru acțiuni, sau Reîncarcă pentru status fresh.")
|
||||
return "\n".join(lines)
|
||||
@@ -1,9 +1,17 @@
|
||||
"""Telegram bot adapter — commands and message handlers."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from telegram import BotCommand, Update, InlineKeyboardButton, InlineKeyboardMarkup
|
||||
from telegram import (
|
||||
BotCommand,
|
||||
ForceReply,
|
||||
InlineKeyboardButton,
|
||||
InlineKeyboardMarkup,
|
||||
Update,
|
||||
)
|
||||
from telegram.constants import ChatAction, ChatType
|
||||
from telegram.ext import (
|
||||
Application,
|
||||
@@ -22,13 +30,32 @@ from src.claude_session import (
|
||||
VALID_MODELS,
|
||||
)
|
||||
from src.fast_commands import dispatch as fast_dispatch
|
||||
from src import ralph_flow
|
||||
from src.router import (
|
||||
route_message,
|
||||
_load_approved_tasks,
|
||||
_ralph_propose,
|
||||
_ralph_approve,
|
||||
_ralph_status,
|
||||
_ralph_stop,
|
||||
planning_advance,
|
||||
planning_approve,
|
||||
planning_cancel,
|
||||
start_planning_session,
|
||||
)
|
||||
from src.planning_session import is_in_planning
|
||||
|
||||
WORKSPACE_DIR = Path("/home/moltbot/workspace")
|
||||
ADAPTER_NAME = "telegram"
|
||||
|
||||
_RALPH_STATUS_EMOJI = {
|
||||
"pending": "📋",
|
||||
"approved": "⏳",
|
||||
"running": "🟢",
|
||||
"complete": "✅",
|
||||
"failed": "❌",
|
||||
"stopped": "⏹",
|
||||
}
|
||||
|
||||
logger = logging.getLogger("echo-core.telegram")
|
||||
_security_log = logging.getLogger("echo-core.security")
|
||||
@@ -339,12 +366,124 @@ async def cmd_ralph_a(update: Update, context: ContextTypes.DEFAULT_TYPE) -> Non
|
||||
await update.message.reply_text(result)
|
||||
|
||||
|
||||
def _list_workspace_projects() -> list[str]:
|
||||
if not WORKSPACE_DIR.exists():
|
||||
return []
|
||||
return sorted(
|
||||
p.name for p in WORKSPACE_DIR.iterdir()
|
||||
if p.is_dir() and not p.name.startswith(".")
|
||||
)
|
||||
|
||||
|
||||
def _project_status_map() -> dict[str, str]:
|
||||
"""Return {slug: status} from approved-tasks.json."""
|
||||
try:
|
||||
data = _load_approved_tasks()
|
||||
except Exception:
|
||||
return {}
|
||||
return {p.get("name", ""): p.get("status", "") for p in data.get("projects", [])}
|
||||
|
||||
|
||||
def _build_ralph_root_keyboard() -> InlineKeyboardMarkup:
|
||||
"""Build the /l landing keyboard: project rows + refresh/close."""
|
||||
statuses = _project_status_map()
|
||||
rows: list[list[InlineKeyboardButton]] = []
|
||||
current_row: list[InlineKeyboardButton] = []
|
||||
for slug in _list_workspace_projects():
|
||||
emoji = _RALPH_STATUS_EMOJI.get(statuses.get(slug, ""), "·")
|
||||
current_row.append(
|
||||
InlineKeyboardButton(
|
||||
f"{emoji} {slug}",
|
||||
callback_data=f"ralph:project:{slug}",
|
||||
)
|
||||
)
|
||||
if len(current_row) == 2:
|
||||
rows.append(current_row)
|
||||
current_row = []
|
||||
if current_row:
|
||||
rows.append(current_row)
|
||||
rows.append([
|
||||
InlineKeyboardButton("🔄 Reîncarcă", callback_data="ralph:refresh"),
|
||||
InlineKeyboardButton("❌ Închide", callback_data="ralph:close"),
|
||||
])
|
||||
return InlineKeyboardMarkup(rows)
|
||||
|
||||
|
||||
def _build_ralph_project_keyboard(slug: str) -> InlineKeyboardMarkup:
|
||||
return InlineKeyboardMarkup([
|
||||
[
|
||||
InlineKeyboardButton("➕ Propune feature", callback_data=f"ralph:propose:{slug}"),
|
||||
InlineKeyboardButton("🧠 Planifică", callback_data=f"ralph:plan:{slug}"),
|
||||
],
|
||||
[
|
||||
InlineKeyboardButton("👁 Vezi PRD", callback_data=f"ralph:prd:{slug}"),
|
||||
InlineKeyboardButton("📊 Status", callback_data=f"ralph:status:{slug}"),
|
||||
],
|
||||
[
|
||||
InlineKeyboardButton("✅ Aprobă tonight", callback_data=f"ralph:approve:{slug}"),
|
||||
InlineKeyboardButton("🛑 Stop", callback_data=f"ralph:stop:{slug}"),
|
||||
],
|
||||
[
|
||||
InlineKeyboardButton("🔙 Înapoi", callback_data="ralph:menu"),
|
||||
],
|
||||
])
|
||||
|
||||
|
||||
def _build_planning_active_keyboard() -> InlineKeyboardMarkup:
|
||||
"""Keyboard shown DURING an active planning session (after each turn)."""
|
||||
return InlineKeyboardMarkup([
|
||||
[
|
||||
InlineKeyboardButton("▶️ Continuă faza", callback_data="ralph:planadvance"),
|
||||
InlineKeyboardButton("🛑 Anulează", callback_data="ralph:plancancel"),
|
||||
],
|
||||
])
|
||||
|
||||
|
||||
def _build_planning_final_keyboard() -> InlineKeyboardMarkup:
|
||||
"""Keyboard shown when the planning pipeline has finished all phases."""
|
||||
return InlineKeyboardMarkup([
|
||||
[
|
||||
InlineKeyboardButton("✅ Dau drumul tonight", callback_data="ralph:planapprove"),
|
||||
InlineKeyboardButton("🛑 Anulează", callback_data="ralph:plancancel"),
|
||||
],
|
||||
])
|
||||
|
||||
|
||||
def _render_ralph_root_summary() -> str:
|
||||
try:
|
||||
data = _load_approved_tasks()
|
||||
except Exception:
|
||||
data = {"projects": []}
|
||||
active = [
|
||||
p for p in data.get("projects", [])
|
||||
if p.get("status") in ("pending", "approved", "running")
|
||||
]
|
||||
lines = ["📋 *Proiecte Ralph*"]
|
||||
if active:
|
||||
lines.append("")
|
||||
lines.append("*Active:*")
|
||||
for p in active[:10]:
|
||||
emoji = _RALPH_STATUS_EMOJI.get(p.get("status", ""), "·")
|
||||
desc = (p.get("description") or "")[:60]
|
||||
lines.append(f"{emoji} `{p.get('name')}` — {desc}")
|
||||
lines.append("")
|
||||
lines.append("Apasă pe un proiect pentru acțiuni.")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
async def cmd_ralph_l(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
"""/l — status proiecte Ralph."""
|
||||
"""/l — listă interactivă proiecte Ralph."""
|
||||
args = list(context.args or [])
|
||||
filter_slug = args[0].lower() if args else None
|
||||
result = await asyncio.to_thread(_ralph_status, filter_slug)
|
||||
await update.message.reply_text(result)
|
||||
if args:
|
||||
filter_slug = args[0].lower()
|
||||
result = await asyncio.to_thread(_ralph_status, filter_slug)
|
||||
await update.message.reply_text(result)
|
||||
return
|
||||
await update.message.reply_text(
|
||||
_render_ralph_root_summary(),
|
||||
reply_markup=_build_ralph_root_keyboard(),
|
||||
parse_mode="Markdown",
|
||||
)
|
||||
|
||||
|
||||
async def cmd_ralph_k(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
@@ -357,6 +496,247 @@ async def cmd_ralph_k(update: Update, context: ContextTypes.DEFAULT_TYPE) -> Non
|
||||
await update.message.reply_text(result)
|
||||
|
||||
|
||||
def split_planning_chunks(text: str, limit: int = 4096) -> list[str]:
|
||||
"""Telegram-safe split (mirrors split_message but local to avoid forward ref)."""
|
||||
if len(text) <= limit:
|
||||
return [text]
|
||||
chunks = []
|
||||
while text:
|
||||
if len(text) <= limit:
|
||||
chunks.append(text)
|
||||
break
|
||||
cut = text.rfind("\n", 0, limit)
|
||||
if cut == -1:
|
||||
cut = limit
|
||||
chunks.append(text[:cut])
|
||||
text = text[cut:].lstrip("\n")
|
||||
return chunks
|
||||
|
||||
|
||||
async def cmd_plan(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
"""/plan <slug> [descriere] — pornește o sesiune de planning conversational."""
|
||||
args = list(context.args or [])
|
||||
if not args:
|
||||
await update.message.reply_text("Folosire: /plan <slug> [descriere]")
|
||||
return
|
||||
slug = args[0]
|
||||
description = " ".join(args[1:]).strip()
|
||||
if not description:
|
||||
# Look up from approved-tasks
|
||||
try:
|
||||
data = _load_approved_tasks()
|
||||
except Exception:
|
||||
data = {"projects": []}
|
||||
for p in data.get("projects", []):
|
||||
if p.get("name", "").lower() == slug.lower():
|
||||
description = p.get("description") or ""
|
||||
break
|
||||
if not description:
|
||||
await update.message.reply_text(
|
||||
f"Nu am descriere pentru `{slug}`. Adaugă cu /p {slug} <descriere>.",
|
||||
parse_mode="Markdown",
|
||||
)
|
||||
return
|
||||
chat_id = update.message.chat_id
|
||||
await update.message.reply_text(
|
||||
f"🧠 Pornesc planning pentru *{slug}*… (durează ~60s)",
|
||||
parse_mode="Markdown",
|
||||
)
|
||||
first = await asyncio.to_thread(
|
||||
start_planning_session, slug, description, str(chat_id), ADAPTER_NAME,
|
||||
)
|
||||
for chunk in split_planning_chunks(first):
|
||||
await context.bot.send_message(chat_id=chat_id, text=chunk)
|
||||
await context.bot.send_message(
|
||||
chat_id=chat_id,
|
||||
text="Răspunde aici. Apasă _Continuă faza_ când ești gata să trec la următoarea.",
|
||||
reply_markup=_build_planning_active_keyboard(),
|
||||
parse_mode="Markdown",
|
||||
)
|
||||
|
||||
|
||||
async def cmd_cancel_planning(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
"""/cancel — anulează sesiunea de planning curentă."""
|
||||
text = await asyncio.to_thread(
|
||||
planning_cancel, str(update.message.chat_id), ADAPTER_NAME,
|
||||
)
|
||||
await update.message.reply_text(text)
|
||||
|
||||
|
||||
async def callback_ralph(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
"""Handle inline keyboard callbacks for Ralph (pattern ^ralph:)."""
|
||||
query = update.callback_query
|
||||
await query.answer()
|
||||
|
||||
data = query.data or ""
|
||||
parts = data.split(":", 2)
|
||||
if len(parts) < 2 or parts[0] != "ralph":
|
||||
return
|
||||
action = parts[1]
|
||||
slug = parts[2] if len(parts) > 2 else None
|
||||
|
||||
chat_id = str(query.message.chat_id)
|
||||
user_id = str(query.from_user.id) if query.from_user else "0"
|
||||
|
||||
if action == "menu" or action == "refresh":
|
||||
try:
|
||||
await query.edit_message_text(
|
||||
_render_ralph_root_summary(),
|
||||
reply_markup=_build_ralph_root_keyboard(),
|
||||
parse_mode="Markdown",
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to refresh ralph menu")
|
||||
return
|
||||
|
||||
if action == "close":
|
||||
try:
|
||||
await query.edit_message_text("Închis.", reply_markup=None)
|
||||
except Exception:
|
||||
logger.exception("Failed to close ralph menu")
|
||||
return
|
||||
|
||||
if not slug:
|
||||
return
|
||||
|
||||
if action == "project":
|
||||
try:
|
||||
await query.edit_message_text(
|
||||
f"*{slug}*\nAlege o acțiune:",
|
||||
reply_markup=_build_ralph_project_keyboard(slug),
|
||||
parse_mode="Markdown",
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to open ralph project menu")
|
||||
return
|
||||
|
||||
if action == "propose":
|
||||
# Set state then prompt with ForceReply for description
|
||||
ralph_flow.set_state(
|
||||
ADAPTER_NAME, chat_id, user_id,
|
||||
step=ralph_flow.STEP_INPUT_DESCRIPTION,
|
||||
project=slug,
|
||||
)
|
||||
await context.bot.send_message(
|
||||
chat_id=int(chat_id),
|
||||
text=f"📝 Descriere pentru *{slug}* (1-3 propoziții):",
|
||||
reply_markup=ForceReply(selective=True),
|
||||
parse_mode="Markdown",
|
||||
)
|
||||
return
|
||||
|
||||
if action == "prd":
|
||||
prd_path = WORKSPACE_DIR / slug / "scripts" / "ralph" / "prd.json"
|
||||
if not prd_path.exists():
|
||||
await context.bot.send_message(
|
||||
chat_id=int(chat_id),
|
||||
text=f"Nu există PRD pentru `{slug}`. Aprobă-l și night-execute îl generează.",
|
||||
parse_mode="Markdown",
|
||||
)
|
||||
return
|
||||
try:
|
||||
prd = json.loads(prd_path.read_text(encoding="utf-8"))
|
||||
except (ValueError, OSError) as e:
|
||||
await context.bot.send_message(chat_id=int(chat_id), text=f"PRD corupt: {e}")
|
||||
return
|
||||
stories = prd.get("userStories", [])
|
||||
done = sum(1 for s in stories if s.get("passes"))
|
||||
lines = [f"*PRD pentru {slug}* — {done}/{len(stories)} stories"]
|
||||
for s in stories[:12]:
|
||||
mark = "✅" if s.get("passes") else "⏳"
|
||||
sid = s.get("id", "?")
|
||||
title = (s.get("title") or "")[:80]
|
||||
lines.append(f"{mark} `{sid}` {title}")
|
||||
if len(stories) > 12:
|
||||
lines.append(f"\n…și încă {len(stories) - 12} stories.")
|
||||
await context.bot.send_message(
|
||||
chat_id=int(chat_id),
|
||||
text="\n".join(lines),
|
||||
parse_mode="Markdown",
|
||||
)
|
||||
return
|
||||
|
||||
if action == "status":
|
||||
result = await asyncio.to_thread(_ralph_status, slug)
|
||||
await context.bot.send_message(chat_id=int(chat_id), text=result)
|
||||
return
|
||||
|
||||
if action == "approve":
|
||||
result = await asyncio.to_thread(_ralph_approve, [slug])
|
||||
await context.bot.send_message(chat_id=int(chat_id), text=result)
|
||||
return
|
||||
|
||||
if action == "stop":
|
||||
result = await asyncio.to_thread(_ralph_stop, slug)
|
||||
await context.bot.send_message(chat_id=int(chat_id), text=result)
|
||||
return
|
||||
|
||||
# ---- Planning agent (W2) ---------------------------------------------
|
||||
if action == "plan":
|
||||
# Look up project description from approved-tasks.json (or workspace fallback).
|
||||
try:
|
||||
data = _load_approved_tasks()
|
||||
except Exception:
|
||||
data = {"projects": []}
|
||||
description = ""
|
||||
for p in data.get("projects", []):
|
||||
if p.get("name", "").lower() == (slug or "").lower():
|
||||
description = p.get("description") or ""
|
||||
break
|
||||
if not description:
|
||||
await context.bot.send_message(
|
||||
chat_id=int(chat_id),
|
||||
text=(
|
||||
f"Nu am descriere pentru `{slug}`. "
|
||||
f"Adaugă mai întâi cu `/p {slug} <descriere>`."
|
||||
),
|
||||
parse_mode="Markdown",
|
||||
)
|
||||
return
|
||||
await context.bot.send_message(
|
||||
chat_id=int(chat_id),
|
||||
text=f"🧠 Pornesc planning pentru *{slug}*… (durează ~60s)",
|
||||
parse_mode="Markdown",
|
||||
)
|
||||
first = await asyncio.to_thread(
|
||||
start_planning_session, slug, description, str(chat_id), ADAPTER_NAME,
|
||||
)
|
||||
for chunk in split_message(first):
|
||||
await context.bot.send_message(chat_id=int(chat_id), text=chunk)
|
||||
await context.bot.send_message(
|
||||
chat_id=int(chat_id),
|
||||
text="Răspunde aici. Apasă _Continuă faza_ când ești gata să trec la următoarea.",
|
||||
reply_markup=_build_planning_active_keyboard(),
|
||||
parse_mode="Markdown",
|
||||
)
|
||||
return
|
||||
|
||||
if action == "planadvance":
|
||||
await context.bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
|
||||
text, completed = await asyncio.to_thread(
|
||||
planning_advance, str(chat_id), ADAPTER_NAME,
|
||||
)
|
||||
for chunk in split_message(text):
|
||||
await context.bot.send_message(chat_id=int(chat_id), text=chunk)
|
||||
kb = _build_planning_final_keyboard() if completed else _build_planning_active_keyboard()
|
||||
await context.bot.send_message(
|
||||
chat_id=int(chat_id),
|
||||
text=("Plan gata. Confirmi?" if completed else "Continuăm?"),
|
||||
reply_markup=kb,
|
||||
)
|
||||
return
|
||||
|
||||
if action == "plancancel":
|
||||
text = await asyncio.to_thread(planning_cancel, str(chat_id), ADAPTER_NAME)
|
||||
await context.bot.send_message(chat_id=int(chat_id), text=text)
|
||||
return
|
||||
|
||||
if action == "planapprove":
|
||||
text = await asyncio.to_thread(planning_approve, str(chat_id), ADAPTER_NAME)
|
||||
await context.bot.send_message(chat_id=int(chat_id), text=text)
|
||||
return
|
||||
|
||||
|
||||
# --- Fast command handlers ---
|
||||
|
||||
|
||||
@@ -535,6 +915,17 @@ async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE) ->
|
||||
chat_id, text[:100],
|
||||
)
|
||||
|
||||
# Ralph multi-step state: if user is replying to a "Descriere pentru X" prompt,
|
||||
# route this text to _ralph_propose instead of Claude.
|
||||
state = ralph_flow.get_state(ADAPTER_NAME, str(chat_id), str(user_id))
|
||||
if state and state.get("step") == ralph_flow.STEP_INPUT_DESCRIPTION:
|
||||
slug = state.get("project")
|
||||
if slug:
|
||||
ralph_flow.clear_state(ADAPTER_NAME, str(chat_id), str(user_id))
|
||||
result = await asyncio.to_thread(_ralph_propose, slug, text)
|
||||
await message.reply_text(result)
|
||||
return
|
||||
|
||||
# Show typing indicator
|
||||
await context.bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
|
||||
|
||||
@@ -556,6 +947,7 @@ async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE) ->
|
||||
response, _is_cmd = await asyncio.to_thread(
|
||||
route_message, str(chat_id), str(user_id), text,
|
||||
on_text=on_text,
|
||||
adapter_name=ADAPTER_NAME,
|
||||
)
|
||||
|
||||
# Only send combined response if no intermediates were delivered
|
||||
@@ -586,6 +978,7 @@ def create_telegram_bot(config: Config, token: str) -> Application:
|
||||
app.add_handler(CommandHandler("model", cmd_model))
|
||||
app.add_handler(CommandHandler("register", cmd_register))
|
||||
app.add_handler(CallbackQueryHandler(callback_model, pattern="^model:"))
|
||||
app.add_handler(CallbackQueryHandler(callback_ralph, pattern="^ralph:"))
|
||||
|
||||
# Ralph commands
|
||||
app.add_handler(CommandHandler("p", cmd_ralph_p))
|
||||
@@ -593,6 +986,10 @@ def create_telegram_bot(config: Config, token: str) -> Application:
|
||||
app.add_handler(CommandHandler("l", cmd_ralph_l))
|
||||
app.add_handler(CommandHandler("k", cmd_ralph_k))
|
||||
|
||||
# Planning agent (W2)
|
||||
app.add_handler(CommandHandler("plan", cmd_plan))
|
||||
app.add_handler(CommandHandler("cancel", cmd_cancel_planning))
|
||||
|
||||
# Fast commands
|
||||
app.add_handler(CommandHandler("email", cmd_email))
|
||||
app.add_handler(CommandHandler("emailsend", cmd_emailsend))
|
||||
@@ -647,6 +1044,8 @@ def create_telegram_bot(config: Config, token: str) -> Application:
|
||||
BotCommand("a", "Ralph: approve project for tonight"),
|
||||
BotCommand("l", "Ralph: list projects status"),
|
||||
BotCommand("k", "Ralph: stop running project"),
|
||||
BotCommand("plan", "Planning conversational pentru un proiect"),
|
||||
BotCommand("cancel", "Anulează planning în curs"),
|
||||
])
|
||||
|
||||
app.post_init = post_init
|
||||
|
||||
@@ -226,6 +226,7 @@ async def handle_incoming(msg: dict, client: httpx.AsyncClient) -> None:
|
||||
response, _is_cmd = await asyncio.to_thread(
|
||||
route_message, channel_id, user_id, text,
|
||||
on_text=on_text,
|
||||
adapter_name="whatsapp",
|
||||
)
|
||||
# Only send combined response if no intermediates were delivered
|
||||
if sent_count == 0:
|
||||
|
||||
@@ -227,6 +227,7 @@ def _run_claude(
|
||||
cmd: list[str],
|
||||
timeout: int,
|
||||
on_text: Callable[[str], None] | None = None,
|
||||
cwd: Path | str | None = None,
|
||||
) -> dict:
|
||||
"""Run a Claude CLI command and return parsed output.
|
||||
|
||||
@@ -237,6 +238,10 @@ def _run_claude(
|
||||
If *on_text* is provided it is called with each intermediate text block
|
||||
as soon as it arrives (before the process finishes), enabling real-time
|
||||
streaming to adapters.
|
||||
|
||||
*cwd* — optional working directory override (default: PROJECT_ROOT).
|
||||
Used by PlanningSession to scope the subprocess to ``~/workspace/<slug>/``
|
||||
so artifacts land in the target repo.
|
||||
"""
|
||||
if not shutil.which(CLAUDE_BIN):
|
||||
raise FileNotFoundError(
|
||||
@@ -250,7 +255,7 @@ def _run_claude(
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
env=_safe_env(),
|
||||
cwd=PROJECT_ROOT,
|
||||
cwd=str(cwd) if cwd else PROJECT_ROOT,
|
||||
)
|
||||
|
||||
# Watchdog thread: kill the process if it exceeds the timeout
|
||||
@@ -346,6 +351,10 @@ def _run_claude(
|
||||
"duration_ms": result_obj.get("duration_ms", 0),
|
||||
"num_turns": result_obj.get("num_turns", 0),
|
||||
"intermediate_count": intermediate_count,
|
||||
# Surface subtype/is_error for callers that retry on `error_max_turns`
|
||||
# (PlanningSession does this — spike findings recommended retry strategy).
|
||||
"subtype": result_obj.get("subtype", ""),
|
||||
"is_error": bool(result_obj.get("is_error", False)),
|
||||
}
|
||||
|
||||
|
||||
|
||||
299
src/planning_orchestrator.py
Normal file
299
src/planning_orchestrator.py
Normal file
@@ -0,0 +1,299 @@
|
||||
"""PlanningOrchestrator — multi-phase planning coordinator.
|
||||
|
||||
Sequences `/office-hours → /plan-ceo-review → /plan-eng-review →
|
||||
/plan-design-review` (last only when description hints at UI scope), each in a
|
||||
**fresh subprocess** (per W2 plan + spike findings). Phases coordinate via
|
||||
disk artifacts (gstack convention: `~/.gstack/projects/<slug>/...`).
|
||||
|
||||
API used by router/adapters:
|
||||
|
||||
PlanningOrchestrator.start(slug, description, channel_id, adapter)
|
||||
→ (session, first_response_text)
|
||||
|
||||
PlanningOrchestrator.respond(adapter, channel_id, message)
|
||||
→ (session, response_text, phase_ready: bool)
|
||||
|
||||
PlanningOrchestrator.advance(adapter, channel_id)
|
||||
→ (session, first_response_text, completed: bool)
|
||||
# completed=True when no further phase remains; final-plan stub
|
||||
# written by the orchestrator if the planning agent didn't.
|
||||
|
||||
PlanningOrchestrator.cancel(adapter, channel_id)
|
||||
→ bool
|
||||
|
||||
PlanningOrchestrator.has_ui_scope(description) → bool
|
||||
|
||||
The orchestrator writes the final-plan stub path even if planning agent did
|
||||
not (so PRD generator always has something to read in W3).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
|
||||
from src.planning_session import (
|
||||
GSTACK_PROJECTS_ROOT,
|
||||
PHASE_READY_MARKER,
|
||||
WORKSPACE_ROOT,
|
||||
PlanningSession,
|
||||
_load_planning_state,
|
||||
_save_planning_state,
|
||||
_channel_key,
|
||||
clear_planning_state,
|
||||
get_planning_state,
|
||||
)
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Ordered phase pipeline. The design phase is appended only when the
|
||||
# description hints at UI scope (heuristic — see has_ui_scope).
|
||||
BASE_PHASES = ["/office-hours", "/plan-ceo-review", "/plan-eng-review"]
|
||||
DESIGN_PHASE = "/plan-design-review"
|
||||
|
||||
UI_HINT_PATTERN = re.compile(
|
||||
r"\b(ui|ux|frontend|design|button|page|css|html|interfa[țt]?[aăă]?|"
|
||||
r"layout|component|view|dashboard|modal|form|screen|"
|
||||
# Romanian variants
|
||||
r"pagin[ăa]|buton|ecran|formular)",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
|
||||
def has_ui_scope(description: str) -> bool:
|
||||
"""Cheap heuristic — adds /plan-design-review when description mentions UI."""
|
||||
return bool(UI_HINT_PATTERN.search(description or ""))
|
||||
|
||||
|
||||
def _phases_for(description: str) -> list[str]:
|
||||
phases = list(BASE_PHASES)
|
||||
if has_ui_scope(description):
|
||||
phases.append(DESIGN_PHASE)
|
||||
return phases
|
||||
|
||||
|
||||
def _final_plan_path(slug: str) -> Path:
|
||||
return WORKSPACE_ROOT / slug / "scripts" / "ralph" / "final-plan.md"
|
||||
|
||||
|
||||
def _ensure_final_plan_dir(slug: str) -> Path:
|
||||
target = _final_plan_path(slug)
|
||||
target.parent.mkdir(parents=True, exist_ok=True)
|
||||
return target
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Orchestrator
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class PlanningOrchestrator:
|
||||
"""Stateless coordinator — all state is in `sessions/planning.json`."""
|
||||
|
||||
@staticmethod
|
||||
def start(
|
||||
slug: str,
|
||||
description: str,
|
||||
channel_id: str,
|
||||
adapter: str = "echo",
|
||||
on_text: Callable[[str], None] | None = None,
|
||||
) -> tuple[PlanningSession, str]:
|
||||
"""Begin planning at phase 0 (`/office-hours`). Persists state.
|
||||
|
||||
Returns (PlanningSession, first_response_text).
|
||||
"""
|
||||
phases = _phases_for(description)
|
||||
first_phase = phases[0]
|
||||
log.info(
|
||||
"planning.start slug=%s adapter=%s channel=%s phases=%s",
|
||||
slug, adapter, channel_id, phases,
|
||||
)
|
||||
# Wipe any prior state for this channel (start fresh).
|
||||
clear_planning_state(adapter, channel_id)
|
||||
|
||||
session = PlanningSession.start(
|
||||
slug=slug,
|
||||
description=description,
|
||||
phase=first_phase,
|
||||
channel_id=channel_id,
|
||||
adapter=adapter,
|
||||
on_text=on_text,
|
||||
)
|
||||
# Stash phase plan into disk state so advance() knows the pipeline.
|
||||
data = _load_planning_state()
|
||||
key = _channel_key(adapter, channel_id)
|
||||
if key in data:
|
||||
data[key]["phases_planned"] = phases
|
||||
data[key]["phase_index"] = 0
|
||||
data[key]["phases_completed"] = []
|
||||
_save_planning_state(data)
|
||||
return session, session.last_response
|
||||
|
||||
@staticmethod
|
||||
def respond(
|
||||
adapter: str,
|
||||
channel_id: str,
|
||||
message: str,
|
||||
on_text: Callable[[str], None] | None = None,
|
||||
) -> tuple[PlanningSession | None, str, bool]:
|
||||
"""Forward `message` into the active phase via `--resume`.
|
||||
|
||||
Returns (session, response_text, phase_ready).
|
||||
`phase_ready=True` means the planning agent emitted PHASE_READY_MARKER
|
||||
— the adapter should surface a "Continuă faza" / "Finalizează" button.
|
||||
"""
|
||||
session = PlanningSession.from_state(adapter, channel_id)
|
||||
if session is None:
|
||||
return None, "Nu există o sesiune de planning activă pe acest canal.", False
|
||||
text = session.respond(message, on_text=on_text)
|
||||
return session, text, session.is_phase_ready()
|
||||
|
||||
@staticmethod
|
||||
def advance(
|
||||
adapter: str,
|
||||
channel_id: str,
|
||||
on_text: Callable[[str], None] | None = None,
|
||||
) -> tuple[PlanningSession | None, str, bool]:
|
||||
"""Move to the next phase (fresh subprocess). Returns (session, text, completed).
|
||||
|
||||
If no more phases remain, writes a final-plan.md stub (if the agent
|
||||
didn't) and returns (last_session, summary_text, completed=True).
|
||||
"""
|
||||
state = get_planning_state(adapter, channel_id)
|
||||
if not state:
|
||||
return None, "Nu există o sesiune de planning activă.", False
|
||||
|
||||
phases = state.get("phases_planned") or _phases_for(state.get("description", ""))
|
||||
completed = list(state.get("phases_completed") or [])
|
||||
current_phase = state.get("phase")
|
||||
if current_phase and current_phase not in completed:
|
||||
completed.append(current_phase)
|
||||
|
||||
# Find next phase
|
||||
try:
|
||||
cur_idx = phases.index(current_phase) if current_phase in phases else -1
|
||||
except ValueError:
|
||||
cur_idx = -1
|
||||
next_idx = cur_idx + 1
|
||||
|
||||
slug = state["slug"]
|
||||
description = state.get("description", "")
|
||||
|
||||
if next_idx >= len(phases):
|
||||
# Pipeline complete — ensure final-plan.md exists, return summary.
|
||||
target = _ensure_final_plan_dir(slug)
|
||||
if not target.exists():
|
||||
stub = _build_final_plan_stub(slug, description, completed, state)
|
||||
target.write_text(stub, encoding="utf-8")
|
||||
log.info("planning.advance wrote final-plan stub: %s", target)
|
||||
# Persist completion marker but keep state so adapter can show
|
||||
# "Dau drumul tonight?" buttons.
|
||||
data = _load_planning_state()
|
||||
key = _channel_key(adapter, channel_id)
|
||||
if key in data:
|
||||
data[key]["phases_completed"] = completed
|
||||
data[key]["phase"] = "__complete__"
|
||||
data[key]["final_plan_path"] = str(target)
|
||||
_save_planning_state(data)
|
||||
session = PlanningSession.from_state(adapter, channel_id)
|
||||
summary = _summary_text(slug, completed, target)
|
||||
return session, summary, True
|
||||
|
||||
next_phase = phases[next_idx]
|
||||
log.info(
|
||||
"planning.advance slug=%s adapter=%s channel=%s %s → %s",
|
||||
slug, adapter, channel_id, current_phase, next_phase,
|
||||
)
|
||||
# Fresh subprocess for the next phase. Phase coordinates with prior
|
||||
# phase via gstack disk artifacts (~/.gstack/projects/<slug>/).
|
||||
session = PlanningSession.start(
|
||||
slug=slug,
|
||||
description=description,
|
||||
phase=next_phase,
|
||||
channel_id=channel_id,
|
||||
adapter=adapter,
|
||||
on_text=on_text,
|
||||
)
|
||||
data = _load_planning_state()
|
||||
key = _channel_key(adapter, channel_id)
|
||||
if key in data:
|
||||
data[key]["phases_completed"] = completed
|
||||
data[key]["phase_index"] = next_idx
|
||||
data[key]["phases_planned"] = phases
|
||||
_save_planning_state(data)
|
||||
return session, session.last_response, False
|
||||
|
||||
@staticmethod
|
||||
def cancel(adapter: str, channel_id: str) -> bool:
|
||||
"""Drop planning state. Returns True if anything was cleared."""
|
||||
return clear_planning_state(adapter, channel_id)
|
||||
|
||||
@staticmethod
|
||||
def final_plan_path(slug: str) -> Path:
|
||||
return _final_plan_path(slug)
|
||||
|
||||
# Re-exported for convenience.
|
||||
has_ui_scope = staticmethod(has_ui_scope)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _summary_text(slug: str, completed_phases: list[str], plan_path: Path) -> str:
|
||||
phases_str = " → ".join(completed_phases) if completed_phases else "(nicio fază)"
|
||||
return (
|
||||
f"✅ **Plan gata pentru `{slug}`**\n\n"
|
||||
f"Faze rulate: {phases_str}\n"
|
||||
f"Plan salvat: `{plan_path}`\n\n"
|
||||
"Apasă **Dau drumul tonight** ca Ralph să-l implementeze la 23:00, "
|
||||
"sau **Anulează** dacă vrei să mai gândim."
|
||||
)
|
||||
|
||||
|
||||
def _build_final_plan_stub(
|
||||
slug: str, description: str, completed_phases: list[str], state: dict
|
||||
) -> str:
|
||||
"""Emit a minimal final-plan.md when the planning agent didn't write one.
|
||||
|
||||
Captures what we know so PRD generator (W3) has something concrete to read.
|
||||
"""
|
||||
phases_lines = "\n".join(f"- `{p}`" for p in completed_phases) or "- (none)"
|
||||
last_excerpt = (state.get("last_text_excerpt") or "").strip()
|
||||
last_block = (
|
||||
f"\n\n## Last agent output excerpt\n\n```\n{last_excerpt[:2000]}\n```"
|
||||
if last_excerpt
|
||||
else ""
|
||||
)
|
||||
return f"""# Final plan — {slug}
|
||||
|
||||
## Context
|
||||
|
||||
{description}
|
||||
|
||||
## Phases completed
|
||||
|
||||
{phases_lines}
|
||||
|
||||
## Architecture overview
|
||||
|
||||
(To be filled by planning agent. Stub written by PlanningOrchestrator because the
|
||||
agent didn't write its own `final-plan.md` before pipeline completion.)
|
||||
|
||||
## User stories preliminare
|
||||
|
||||
(Stub. Ralph PRD generator will infer concrete stories from this plan + repo state.)
|
||||
|
||||
## Implementation hints
|
||||
|
||||
(Stub.)
|
||||
|
||||
## Verification approach
|
||||
|
||||
- typecheck + lint + tests pe modulele atinse
|
||||
- smart gates Ralph pe tags inferred per story
|
||||
{last_block}
|
||||
"""
|
||||
495
src/planning_session.py
Normal file
495
src/planning_session.py
Normal file
@@ -0,0 +1,495 @@
|
||||
"""PlanningSession — Claude CLI wrapper for conversational planning phases.
|
||||
|
||||
Per the Echo Core conversational planning agent plan (W2), this is intentionally
|
||||
a SEPARATE class from the chat session — NOT a `mode=string` parameter on
|
||||
`ClaudeSession`. The plan calls it "PlanningSession(ClaudeSession) ca SUBCLASĂ".
|
||||
Since `claude_session.py` exposes module-level functions (not a class) we
|
||||
implement PlanningSession as a sibling class that REUSES the shared subprocess
|
||||
helpers (`_run_claude`, `_safe_env`, `CLAUDE_BIN`, `SESSIONS_DIR`) but keeps:
|
||||
|
||||
- its own state file (`sessions/planning.json`)
|
||||
- its own system prompt (`prompts/planning_agent.md`)
|
||||
- per-slug working directory (`~/workspace/<slug>/`)
|
||||
- `--add-dir` flags for skills + gstack project artifacts
|
||||
- `--max-turns 20` default with retry on `error_max_turns`
|
||||
|
||||
Spike findings (`tasks/spike-planning-findings.md`):
|
||||
- `claude -p '/skill'` → text serialization of AskUserQuestion. ✅
|
||||
- `claude --resume <id> -p '<reply>'` round-trip preserves context. ✅
|
||||
- Complex prompts can blow turn budget → MUST handle `error_max_turns`.
|
||||
- Cost ~ $0.5–1.1/turn Opus 4.7 1M; Marius on subscription so ignore USD.
|
||||
|
||||
Architectural decisions captured for the W2 commit message:
|
||||
1. Separate class (not mode parameter) — clean separation, easy to remove
|
||||
planning entirely without touching chat session.
|
||||
2. Fresh subprocess PER skill phase, NOT a single resumed session — phases
|
||||
coordinate via disk artifacts (gstack convention:
|
||||
`~/.gstack/projects/<slug>/{user}-{branch}-{phase}-*.md`).
|
||||
3. State per `(adapter, channel)` keyed string — same convention as
|
||||
`claude_session.active.json`. Re-resume on restart is supported via
|
||||
`claude --resume <stored_id>`.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
|
||||
from src.claude_session import (
|
||||
CLAUDE_BIN,
|
||||
PROJECT_ROOT,
|
||||
SESSIONS_DIR,
|
||||
_run_claude,
|
||||
_safe_env,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
_invoke_log = logging.getLogger("echo-core.invoke")
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Constants
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
PLANNING_STATE_FILE = SESSIONS_DIR / "planning.json"
|
||||
PROMPTS_DIR = PROJECT_ROOT / "prompts"
|
||||
PLANNING_PROMPT_FILE = PROMPTS_DIR / "planning_agent.md"
|
||||
|
||||
# Roots scoped into each planning subprocess via --add-dir
|
||||
WORKSPACE_ROOT = Path("/home/moltbot/workspace")
|
||||
GSTACK_PROJECTS_ROOT = Path.home() / ".gstack" / "projects"
|
||||
SKILLS_ROOT = Path.home() / ".claude" / "skills"
|
||||
|
||||
# Spike: prompts deep-tool-use can blow small budgets; 20 default with retry.
|
||||
DEFAULT_MAX_TURNS = 20
|
||||
RETRY_MAX_TURNS = 30 # boost on `error_max_turns`
|
||||
DEFAULT_TIMEOUT = 600 # seconds — planning turns are slower than chat
|
||||
|
||||
# Marker the planning agent emits when a phase is conceptually done.
|
||||
# Orchestrator scans for this to decide when to surface the "Continuă faza"
|
||||
# button. Convention pinned in `prompts/planning_agent.md`.
|
||||
PHASE_READY_MARKER = "PHASE_STATUS: ready_to_advance"
|
||||
PHASE_NEEDS_INPUT_MARKER = "PHASE_STATUS: needs_input"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Disk state — sessions/planning.json
|
||||
# Schema:
|
||||
# {
|
||||
# "<adapter>:<channel_id>": {
|
||||
# "slug": "...",
|
||||
# "description": "...",
|
||||
# "phase": "/office-hours" | "/plan-ceo-review" | ...,
|
||||
# "phases_completed": ["/office-hours", ...],
|
||||
# "session_id": "<claude session uuid>",
|
||||
# "planning_session_id": "<echo internal uuid>",
|
||||
# "started_at": "...",
|
||||
# "updated_at": "...",
|
||||
# "last_text_excerpt": "...", # 500 char excerpt for debugging
|
||||
# "last_subtype": "success" | "error_max_turns" | ...,
|
||||
# }
|
||||
# }
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _channel_key(adapter: str, channel_id: str) -> str:
|
||||
return f"{adapter}:{channel_id}"
|
||||
|
||||
|
||||
def _load_planning_state() -> dict:
|
||||
"""Load planning sessions from disk. Returns {} if missing or empty."""
|
||||
try:
|
||||
text = PLANNING_STATE_FILE.read_text(encoding="utf-8")
|
||||
if not text.strip():
|
||||
return {}
|
||||
return json.loads(text)
|
||||
except (FileNotFoundError, json.JSONDecodeError):
|
||||
return {}
|
||||
|
||||
|
||||
def _save_planning_state(data: dict) -> None:
|
||||
"""Atomically write planning sessions via tempfile + os.replace."""
|
||||
SESSIONS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
fd, tmp_path = tempfile.mkstemp(
|
||||
dir=SESSIONS_DIR, prefix=".planning_", suffix=".json"
|
||||
)
|
||||
try:
|
||||
with os.fdopen(fd, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2, ensure_ascii=False)
|
||||
f.write("\n")
|
||||
os.replace(tmp_path, PLANNING_STATE_FILE)
|
||||
except BaseException:
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# System prompt
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def build_planning_system_prompt(slug: str, description: str, phase: str) -> str:
|
||||
"""Render `prompts/planning_agent.md` with phase-specific values.
|
||||
|
||||
Returns empty string if the prompt file does not exist (skill-only mode).
|
||||
"""
|
||||
if not PLANNING_PROMPT_FILE.exists():
|
||||
logger.warning(
|
||||
"Planning prompt missing: %s — falling back to skill-only mode.",
|
||||
PLANNING_PROMPT_FILE,
|
||||
)
|
||||
return ""
|
||||
template = PLANNING_PROMPT_FILE.read_text(encoding="utf-8")
|
||||
# Use simple replacement (NOT format()) — markdown contains literal `{}`
|
||||
# in code blocks which would explode `.format()`.
|
||||
return (
|
||||
template
|
||||
.replace("{slug}", slug)
|
||||
.replace("{description}", description)
|
||||
.replace("{phase}", phase)
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# PlanningSession class
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class PlanningSession:
|
||||
"""One Claude CLI subprocess scoped to a planning phase.
|
||||
|
||||
Lifecycle:
|
||||
1. ``PlanningSession.start(slug, description, phase, channel, adapter)``
|
||||
— fresh subprocess; first prompt is the skill invocation.
|
||||
2. ``session.respond(message)`` — `claude --resume <session_id>`
|
||||
per user reply. Returns response text + retry hint.
|
||||
3. ``session.is_phase_ready()`` — True when output contains
|
||||
``PHASE_STATUS: ready_to_advance`` (orchestrator advances).
|
||||
4. State persisted in `sessions/planning.json` so restart is recoverable.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
slug: str,
|
||||
description: str,
|
||||
phase: str,
|
||||
channel_id: str,
|
||||
adapter: str = "echo",
|
||||
session_id: str | None = None,
|
||||
planning_session_id: str | None = None,
|
||||
):
|
||||
self.slug = slug
|
||||
self.description = description
|
||||
self.phase = phase
|
||||
self.channel_id = channel_id
|
||||
self.adapter = adapter
|
||||
self.session_id = session_id
|
||||
self.planning_session_id = planning_session_id or str(uuid.uuid4())
|
||||
self._last_response: str = ""
|
||||
self._last_subtype: str = ""
|
||||
self._last_is_error: bool = False
|
||||
|
||||
# -- working directory & --add-dir scoping ------------------------------
|
||||
|
||||
@property
|
||||
def cwd(self) -> Path:
|
||||
"""Working directory for the subprocess.
|
||||
|
||||
Uses `~/workspace/<slug>/` if it exists; otherwise falls back to
|
||||
Echo Core repo root (test mode / pre-clone scenarios).
|
||||
"""
|
||||
target = WORKSPACE_ROOT / self.slug
|
||||
if target.is_dir():
|
||||
return target
|
||||
return PROJECT_ROOT
|
||||
|
||||
def _add_dirs(self) -> list[str]:
|
||||
"""Build `--add-dir` arguments. Skip dirs that don't exist."""
|
||||
candidates = [
|
||||
SKILLS_ROOT,
|
||||
GSTACK_PROJECTS_ROOT / self.slug,
|
||||
GSTACK_PROJECTS_ROOT, # fallback in case slug-specific dir missing
|
||||
]
|
||||
seen: set[str] = set()
|
||||
flags: list[str] = []
|
||||
for d in candidates:
|
||||
if d.exists() and str(d) not in seen:
|
||||
flags.extend(["--add-dir", str(d)])
|
||||
seen.add(str(d))
|
||||
return flags
|
||||
|
||||
# -- command construction ----------------------------------------------
|
||||
|
||||
def _build_cmd(
|
||||
self,
|
||||
prompt: str,
|
||||
*,
|
||||
resume: str | None,
|
||||
max_turns: int,
|
||||
with_system_prompt: bool,
|
||||
) -> list[str]:
|
||||
cmd = [CLAUDE_BIN, "-p", prompt]
|
||||
if resume:
|
||||
cmd += ["--resume", resume]
|
||||
cmd += [
|
||||
"--output-format", "stream-json",
|
||||
"--verbose",
|
||||
"--max-turns", str(max_turns),
|
||||
]
|
||||
if with_system_prompt:
|
||||
sys_prompt = build_planning_system_prompt(
|
||||
self.slug, self.description, self.phase
|
||||
)
|
||||
if sys_prompt:
|
||||
cmd += ["--system-prompt", sys_prompt]
|
||||
cmd += self._add_dirs()
|
||||
cmd += ["--dangerously-skip-permissions"]
|
||||
return cmd
|
||||
|
||||
# -- subprocess invocation ---------------------------------------------
|
||||
|
||||
def _invoke(
|
||||
self,
|
||||
prompt: str,
|
||||
*,
|
||||
resume: str | None,
|
||||
timeout: int,
|
||||
max_turns: int,
|
||||
with_system_prompt: bool,
|
||||
on_text: Callable[[str], None] | None,
|
||||
) -> dict:
|
||||
cmd = self._build_cmd(
|
||||
prompt,
|
||||
resume=resume,
|
||||
max_turns=max_turns,
|
||||
with_system_prompt=with_system_prompt,
|
||||
)
|
||||
_t0 = time.monotonic()
|
||||
result = _run_claude(cmd, timeout=timeout, on_text=on_text, cwd=self.cwd)
|
||||
_elapsed = int((time.monotonic() - _t0) * 1000)
|
||||
_invoke_log.info(
|
||||
"planning slug=%s phase=%s adapter=%s channel=%s duration_ms=%d "
|
||||
"tokens_in=%d tokens_out=%d session=%s subtype=%s cost=%.4f",
|
||||
self.slug, self.phase, self.adapter, self.channel_id, _elapsed,
|
||||
result.get("usage", {}).get("input_tokens", 0),
|
||||
result.get("usage", {}).get("output_tokens", 0),
|
||||
(result.get("session_id") or "")[:8],
|
||||
result.get("subtype", ""),
|
||||
float(result.get("total_cost_usd", 0) or 0),
|
||||
)
|
||||
return result
|
||||
|
||||
# -- public API: start/resume ------------------------------------------
|
||||
|
||||
@classmethod
|
||||
def start(
|
||||
cls,
|
||||
slug: str,
|
||||
description: str,
|
||||
phase: str,
|
||||
channel_id: str,
|
||||
adapter: str = "echo",
|
||||
timeout: int = DEFAULT_TIMEOUT,
|
||||
on_text: Callable[[str], None] | None = None,
|
||||
) -> "PlanningSession":
|
||||
"""Kick off a new phase subprocess. First prompt is the skill call.
|
||||
|
||||
Returns a `PlanningSession` with `session_id`, `_last_response` set.
|
||||
Persists state in `sessions/planning.json` keyed by `(adapter, channel_id)`.
|
||||
Retries once with `RETRY_MAX_TURNS` if first run hits `error_max_turns`.
|
||||
"""
|
||||
session = cls(
|
||||
slug=slug,
|
||||
description=description,
|
||||
phase=phase,
|
||||
channel_id=channel_id,
|
||||
adapter=adapter,
|
||||
)
|
||||
# Compose initial prompt — skill name + slug + description so the skill
|
||||
# has enough hook to start.
|
||||
initial_prompt = f"{phase} {description}".strip()
|
||||
|
||||
result = session._invoke(
|
||||
initial_prompt,
|
||||
resume=None,
|
||||
timeout=timeout,
|
||||
max_turns=DEFAULT_MAX_TURNS,
|
||||
with_system_prompt=True,
|
||||
on_text=on_text,
|
||||
)
|
||||
# Retry on error_max_turns — spike found this happens with deep tool-use.
|
||||
if result.get("subtype") == "error_max_turns" and not result.get("session_id"):
|
||||
logger.warning(
|
||||
"planning start hit error_max_turns for %s/%s — retrying with %d turns",
|
||||
slug, phase, RETRY_MAX_TURNS,
|
||||
)
|
||||
result = session._invoke(
|
||||
initial_prompt,
|
||||
resume=None,
|
||||
timeout=timeout,
|
||||
max_turns=RETRY_MAX_TURNS,
|
||||
with_system_prompt=True,
|
||||
on_text=on_text,
|
||||
)
|
||||
|
||||
session.session_id = result.get("session_id") or None
|
||||
session._last_response = result.get("result", "")
|
||||
session._last_subtype = result.get("subtype", "")
|
||||
session._last_is_error = bool(result.get("is_error", False))
|
||||
session._persist(action="start", cost_usd=float(result.get("total_cost_usd", 0) or 0))
|
||||
return session
|
||||
|
||||
def respond(
|
||||
self,
|
||||
message: str,
|
||||
*,
|
||||
timeout: int = DEFAULT_TIMEOUT,
|
||||
on_text: Callable[[str], None] | None = None,
|
||||
) -> str:
|
||||
"""Send the user's reply to the running phase session via `--resume`.
|
||||
|
||||
Returns the response text. Updates persistent state.
|
||||
"""
|
||||
if not self.session_id:
|
||||
raise RuntimeError(
|
||||
"PlanningSession.respond called without an active session_id"
|
||||
)
|
||||
wrapped = f"[EXTERNAL CONTENT]\n{message}\n[END EXTERNAL CONTENT]"
|
||||
result = self._invoke(
|
||||
wrapped,
|
||||
resume=self.session_id,
|
||||
timeout=timeout,
|
||||
max_turns=DEFAULT_MAX_TURNS,
|
||||
with_system_prompt=False, # already in session
|
||||
on_text=on_text,
|
||||
)
|
||||
# Retry once on error_max_turns
|
||||
if result.get("subtype") == "error_max_turns":
|
||||
logger.warning(
|
||||
"planning respond hit error_max_turns for %s/%s — retrying",
|
||||
self.slug, self.phase,
|
||||
)
|
||||
result = self._invoke(
|
||||
wrapped,
|
||||
resume=self.session_id,
|
||||
timeout=timeout,
|
||||
max_turns=RETRY_MAX_TURNS,
|
||||
with_system_prompt=False,
|
||||
on_text=on_text,
|
||||
)
|
||||
|
||||
self._last_response = result.get("result", "")
|
||||
self._last_subtype = result.get("subtype", "")
|
||||
self._last_is_error = bool(result.get("is_error", False))
|
||||
self._persist(
|
||||
action="respond", cost_usd=float(result.get("total_cost_usd", 0) or 0)
|
||||
)
|
||||
return self._last_response
|
||||
|
||||
# -- introspection ------------------------------------------------------
|
||||
|
||||
def is_phase_ready(self) -> bool:
|
||||
"""True if last response contained the ready-to-advance marker."""
|
||||
return PHASE_READY_MARKER in (self._last_response or "")
|
||||
|
||||
@property
|
||||
def last_response(self) -> str:
|
||||
return self._last_response
|
||||
|
||||
@property
|
||||
def last_subtype(self) -> str:
|
||||
return self._last_subtype
|
||||
|
||||
# -- persistence --------------------------------------------------------
|
||||
|
||||
def _persist(self, *, action: str, cost_usd: float = 0.0) -> None:
|
||||
data = _load_planning_state()
|
||||
key = _channel_key(self.adapter, self.channel_id)
|
||||
existing = data.get(key, {})
|
||||
now = datetime.now(timezone.utc).isoformat()
|
||||
|
||||
phases_completed = existing.get("phases_completed", [])
|
||||
# If this session changed phase, the orchestrator handles transition;
|
||||
# we just keep our own slot consistent with the current phase.
|
||||
entry = {
|
||||
"slug": self.slug,
|
||||
"description": self.description,
|
||||
"phase": self.phase,
|
||||
"phases_completed": phases_completed,
|
||||
"session_id": self.session_id,
|
||||
"planning_session_id": self.planning_session_id,
|
||||
"adapter": self.adapter,
|
||||
"channel_id": self.channel_id,
|
||||
"started_at": existing.get("started_at", now),
|
||||
"updated_at": now,
|
||||
"last_text_excerpt": (self._last_response or "")[:500],
|
||||
"last_subtype": self._last_subtype,
|
||||
"total_cost_usd": (
|
||||
float(existing.get("total_cost_usd") or 0.0) + float(cost_usd or 0.0)
|
||||
),
|
||||
}
|
||||
data[key] = entry
|
||||
_save_planning_state(data)
|
||||
|
||||
@classmethod
|
||||
def from_state(cls, adapter: str, channel_id: str) -> "PlanningSession | None":
|
||||
"""Reconstruct a session from `sessions/planning.json` (post-restart)."""
|
||||
data = _load_planning_state()
|
||||
entry = data.get(_channel_key(adapter, channel_id))
|
||||
if not entry or not entry.get("session_id"):
|
||||
return None
|
||||
sess = cls(
|
||||
slug=entry["slug"],
|
||||
description=entry.get("description", ""),
|
||||
phase=entry["phase"],
|
||||
channel_id=channel_id,
|
||||
adapter=adapter,
|
||||
session_id=entry["session_id"],
|
||||
planning_session_id=entry.get("planning_session_id"),
|
||||
)
|
||||
sess._last_subtype = entry.get("last_subtype", "")
|
||||
sess._last_response = entry.get("last_text_excerpt", "")
|
||||
return sess
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Module-level helpers consumed by router/orchestrator/adapters
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def get_planning_state(adapter: str, channel_id: str) -> dict | None:
|
||||
"""Return persisted planning state for a channel, or None."""
|
||||
return _load_planning_state().get(_channel_key(adapter, channel_id))
|
||||
|
||||
|
||||
def is_in_planning(adapter: str, channel_id: str) -> bool:
|
||||
"""True if there is an active planning session for this channel."""
|
||||
return get_planning_state(adapter, channel_id) is not None
|
||||
|
||||
|
||||
def clear_planning_state(adapter: str, channel_id: str) -> bool:
|
||||
"""Drop persisted planning state. Returns True if anything was cleared."""
|
||||
data = _load_planning_state()
|
||||
key = _channel_key(adapter, channel_id)
|
||||
if key in data:
|
||||
del data[key]
|
||||
_save_planning_state(data)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def list_planning_sessions() -> dict:
|
||||
"""Return all persisted planning sessions (for diagnostics)."""
|
||||
return _load_planning_state()
|
||||
128
src/ralph_flow.py
Normal file
128
src/ralph_flow.py
Normal file
@@ -0,0 +1,128 @@
|
||||
"""Ralph UX flow state — short-lived per (adapter, chat, user) state for interactive menus.
|
||||
|
||||
Tracks state for multi-step flows like "user clicked Propose → next message is description".
|
||||
Persisted in sessions/ralph_flow.json so it survives Echo Core restart.
|
||||
TTL: 10 min default; cleanup_expired() drops stale entries.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
||||
SESSIONS_DIR = PROJECT_ROOT / "sessions"
|
||||
_STATE_FILE = SESSIONS_DIR / "ralph_flow.json"
|
||||
|
||||
DEFAULT_TTL_SECONDS = 600 # 10 minutes
|
||||
|
||||
# Step values used across adapters
|
||||
STEP_INPUT_DESCRIPTION = "input_description"
|
||||
STEP_IN_PLANNING = "in_planning" # reserved for W2 (planning agent)
|
||||
|
||||
|
||||
def _key(adapter: str, chat_id: str, user_id: str) -> str:
|
||||
return f"{adapter}:{chat_id}:{user_id}"
|
||||
|
||||
|
||||
def _load() -> dict:
|
||||
try:
|
||||
text = _STATE_FILE.read_text(encoding="utf-8")
|
||||
if not text.strip():
|
||||
return {}
|
||||
return json.loads(text)
|
||||
except (FileNotFoundError, json.JSONDecodeError):
|
||||
return {}
|
||||
|
||||
|
||||
def _save(data: dict) -> None:
|
||||
SESSIONS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
fd, tmp_path = tempfile.mkstemp(
|
||||
dir=SESSIONS_DIR, prefix=".ralph_flow_", suffix=".json"
|
||||
)
|
||||
try:
|
||||
with os.fdopen(fd, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2, ensure_ascii=False)
|
||||
f.write("\n")
|
||||
os.replace(tmp_path, _STATE_FILE)
|
||||
except BaseException:
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
|
||||
def _is_expired(entry: dict, now: datetime | None = None) -> bool:
|
||||
expires_at = entry.get("expires_at")
|
||||
if not expires_at:
|
||||
return False
|
||||
try:
|
||||
return datetime.fromisoformat(expires_at) < (now or datetime.now(timezone.utc))
|
||||
except ValueError:
|
||||
return True
|
||||
|
||||
|
||||
def get_state(adapter: str, chat_id: str, user_id: str) -> dict | None:
|
||||
"""Return current state or None if absent/expired. Drops expired entries on read."""
|
||||
data = _load()
|
||||
key = _key(adapter, chat_id, user_id)
|
||||
entry = data.get(key)
|
||||
if entry is None:
|
||||
return None
|
||||
if _is_expired(entry):
|
||||
del data[key]
|
||||
_save(data)
|
||||
return None
|
||||
return entry
|
||||
|
||||
|
||||
def set_state(
|
||||
adapter: str,
|
||||
chat_id: str,
|
||||
user_id: str,
|
||||
step: str,
|
||||
project: str | None = None,
|
||||
ttl_seconds: int = DEFAULT_TTL_SECONDS,
|
||||
**extras,
|
||||
) -> None:
|
||||
"""Set state for (adapter, chat, user). Overwrites any previous state."""
|
||||
data = _load()
|
||||
expires_at = (
|
||||
datetime.now(timezone.utc) + timedelta(seconds=ttl_seconds)
|
||||
).isoformat()
|
||||
entry: dict = {"step": step, "expires_at": expires_at}
|
||||
if project is not None:
|
||||
entry["project"] = project
|
||||
entry.update(extras)
|
||||
data[_key(adapter, chat_id, user_id)] = entry
|
||||
_save(data)
|
||||
|
||||
|
||||
def clear_state(adapter: str, chat_id: str, user_id: str) -> bool:
|
||||
"""Clear state. Returns True if anything was cleared."""
|
||||
data = _load()
|
||||
key = _key(adapter, chat_id, user_id)
|
||||
if key in data:
|
||||
del data[key]
|
||||
_save(data)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def cleanup_expired() -> int:
|
||||
"""Drop all expired entries. Returns count dropped."""
|
||||
data = _load()
|
||||
now = datetime.now(timezone.utc)
|
||||
dropped = 0
|
||||
for k in list(data.keys()):
|
||||
if _is_expired(data[k], now=now):
|
||||
del data[k]
|
||||
dropped += 1
|
||||
if dropped:
|
||||
_save(data)
|
||||
return dropped
|
||||
244
src/router.py
244
src/router.py
@@ -18,6 +18,12 @@ from src.claude_session import (
|
||||
set_session_model,
|
||||
VALID_MODELS,
|
||||
)
|
||||
from src.planning_orchestrator import PlanningOrchestrator
|
||||
from src.planning_session import (
|
||||
clear_planning_state,
|
||||
get_planning_state,
|
||||
is_in_planning,
|
||||
)
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -41,6 +47,7 @@ def route_message(
|
||||
text: str,
|
||||
model: str | None = None,
|
||||
on_text: Callable[[str], None] | None = None,
|
||||
adapter_name: str | None = None,
|
||||
) -> tuple[str, bool]:
|
||||
"""Route an incoming message. Returns (response_text, is_command).
|
||||
|
||||
@@ -49,11 +56,65 @@ def route_message(
|
||||
|
||||
*on_text* — optional callback invoked with each intermediate text block
|
||||
from Claude, enabling real-time streaming to the adapter.
|
||||
|
||||
*adapter_name* — "discord" / "telegram" / "whatsapp" / None. Used for
|
||||
adapter-specific response shaping (e.g., redirect line on WhatsApp).
|
||||
"""
|
||||
text = text.strip()
|
||||
|
||||
# ---- Planning state-aware routing -----------------------------------
|
||||
# If the channel is in an active planning session, the user's message is
|
||||
# part of that conversation — route it to the orchestrator (NOT Claude
|
||||
# main session, NOT slash commands except explicit /cancel and /advance).
|
||||
in_planning = is_in_planning(adapter_name or "echo", channel_id)
|
||||
if in_planning:
|
||||
low = text.lower().strip()
|
||||
if low in ("/cancel", "/anuleaza", "/anulează", "anulează planning", "anuleaza planning"):
|
||||
# Capture slug BEFORE clearing state so we can revert approved-tasks status.
|
||||
adapter_key = adapter_name or "echo"
|
||||
state_snapshot = get_planning_state(adapter_key, channel_id)
|
||||
cleared = PlanningOrchestrator.cancel(adapter_key, channel_id)
|
||||
if state_snapshot and state_snapshot.get("slug"):
|
||||
_revert_status_for_slug(state_snapshot["slug"], to="pending")
|
||||
if cleared:
|
||||
return "Planning anulat. Status revenit la pending.", True
|
||||
return "Nu era nicio sesiune activă.", True
|
||||
if low in ("/advance", "/continua", "/continuă", "continuă faza", "continua faza"):
|
||||
session, response, completed = PlanningOrchestrator.advance(
|
||||
adapter_name or "echo", channel_id, on_text=on_text,
|
||||
)
|
||||
return response, True
|
||||
if low in ("/finalize", "/dau drumul", "dau drumul"):
|
||||
return _approve_from_planning(channel_id, adapter_name or "echo"), True
|
||||
if text.startswith("/"):
|
||||
# Allow other commands to fall through (e.g. /status, /clear),
|
||||
# but skip Ralph dispatch and Claude routing below.
|
||||
pass
|
||||
else:
|
||||
# Plain message → planning conversation.
|
||||
try:
|
||||
session, response, phase_ready = PlanningOrchestrator.respond(
|
||||
adapter_name or "echo", channel_id, text, on_text=on_text,
|
||||
)
|
||||
if session is None:
|
||||
# State raced — drop planning marker, fall through.
|
||||
log.warning(
|
||||
"planning state vanished mid-respond for channel=%s", channel_id
|
||||
)
|
||||
else:
|
||||
if phase_ready:
|
||||
response = (
|
||||
response
|
||||
+ "\n\n— Apasă **Continuă faza** ca să trec la următoarea, "
|
||||
"sau **Anulează** dacă te-ai răzgândit."
|
||||
)
|
||||
return response, False
|
||||
except Exception as e:
|
||||
log.error("Planning respond failed for %s: %s", channel_id, e)
|
||||
return f"Planning blocat: {e}", False
|
||||
|
||||
# Ralph commands — short form (/p /a /l /k) and legacy aliases (!propose !approve !status !stop)
|
||||
ralph_response = _try_ralph_dispatch(text)
|
||||
ralph_response = _try_ralph_dispatch(text, adapter_name=adapter_name)
|
||||
if ralph_response is not None:
|
||||
return ralph_response, True
|
||||
|
||||
@@ -168,7 +229,19 @@ RALPH_CMDS = {
|
||||
}
|
||||
|
||||
|
||||
def _try_ralph_dispatch(text: str) -> str | None:
|
||||
_WHATSAPP_REDIRECT = (
|
||||
"\n\n💡 Pentru meniu interactiv folosește Discord sau Telegram."
|
||||
)
|
||||
|
||||
|
||||
def _maybe_whatsapp_redirect(text: str, adapter_name: str | None) -> str:
|
||||
"""Append a redirect hint for WhatsApp users so they discover the rich UX."""
|
||||
if adapter_name == "whatsapp":
|
||||
return text + _WHATSAPP_REDIRECT
|
||||
return text
|
||||
|
||||
|
||||
def _try_ralph_dispatch(text: str, adapter_name: str | None = None) -> str | None:
|
||||
"""Parse and dispatch Ralph commands. Returns response string or None if no match."""
|
||||
low = text.lower()
|
||||
first = low.split(None, 1)[0] if low else ""
|
||||
@@ -176,7 +249,10 @@ def _try_ralph_dispatch(text: str) -> str | None:
|
||||
if first in ("/p", "!propose"):
|
||||
parts = text.split(None, 2)
|
||||
if len(parts) < 3:
|
||||
return "Folosire: /p <slug> <descriere>\nEx: /p roa2web Homepage redesign cu hero section"
|
||||
return _maybe_whatsapp_redirect(
|
||||
"Folosire: /p <slug> <descriere>\nEx: /p roa2web Homepage redesign cu hero section",
|
||||
adapter_name,
|
||||
)
|
||||
return _ralph_propose(parts[1].strip(), parts[2].strip())
|
||||
|
||||
if first in ("/a", "!approve"):
|
||||
@@ -189,7 +265,7 @@ def _try_ralph_dispatch(text: str) -> str | None:
|
||||
if first in ("/l", "!status"):
|
||||
parts = text.split(None, 1)
|
||||
filter_slug = parts[1].strip().lower() if len(parts) > 1 else None
|
||||
return _ralph_status(filter_slug)
|
||||
return _maybe_whatsapp_redirect(_ralph_status(filter_slug), adapter_name)
|
||||
|
||||
if first in ("/k", "!stop"):
|
||||
parts = text.split(None, 1)
|
||||
@@ -201,7 +277,11 @@ def _try_ralph_dispatch(text: str) -> str | None:
|
||||
|
||||
|
||||
def _ralph_propose(slug: str, description: str) -> str:
|
||||
"""Adaugă un proiect cu status pending în approved-tasks.json."""
|
||||
"""Adaugă un proiect cu status pending în approved-tasks.json.
|
||||
|
||||
Schema includes the W2 planning fields (`planning_session_id`,
|
||||
`final_plan_path`) so the orchestrator and PRD generator can find them.
|
||||
"""
|
||||
data = _load_approved_tasks()
|
||||
|
||||
for p in data["projects"]:
|
||||
@@ -212,6 +292,8 @@ def _ralph_propose(slug: str, description: str) -> str:
|
||||
"name": slug,
|
||||
"description": description,
|
||||
"status": "pending",
|
||||
"planning_session_id": None,
|
||||
"final_plan_path": None,
|
||||
"proposed_at": datetime.now(timezone.utc).isoformat(),
|
||||
"approved_at": None,
|
||||
"started_at": None,
|
||||
@@ -352,3 +434,155 @@ def _get_channel_config(channel_id: str) -> dict | None:
|
||||
if ch.get("id") == channel_id:
|
||||
return ch
|
||||
return None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Planning session entry points (W2)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def start_planning_session(
|
||||
slug: str,
|
||||
description: str,
|
||||
channel_id: str,
|
||||
adapter_name: str,
|
||||
on_text: Callable[[str], None] | None = None,
|
||||
) -> str:
|
||||
"""Begin a conversational planning session for `slug` on this channel.
|
||||
|
||||
Updates approved-tasks.json: status `planning`, `planning_session_id` set.
|
||||
Returns the first response text from the planning agent — the adapter
|
||||
will display it and the user replies in the same channel.
|
||||
"""
|
||||
data = _load_approved_tasks()
|
||||
|
||||
# Locate or create the project entry.
|
||||
entry = None
|
||||
for p in data["projects"]:
|
||||
if p["name"].lower() == slug.lower():
|
||||
entry = p
|
||||
break
|
||||
if entry is None:
|
||||
entry = {
|
||||
"name": slug,
|
||||
"description": description,
|
||||
"status": "pending",
|
||||
"planning_session_id": None,
|
||||
"final_plan_path": None,
|
||||
"proposed_at": datetime.now(timezone.utc).isoformat(),
|
||||
"approved_at": None,
|
||||
"started_at": None,
|
||||
"pid": None,
|
||||
}
|
||||
data["projects"].append(entry)
|
||||
|
||||
# Kick off orchestrator (this can take ~60s on first turn — caller should
|
||||
# have already shown a "Echo se gândește..." indicator).
|
||||
try:
|
||||
session, first_response = PlanningOrchestrator.start(
|
||||
slug=slug,
|
||||
description=description,
|
||||
channel_id=channel_id,
|
||||
adapter=adapter_name or "echo",
|
||||
on_text=on_text,
|
||||
)
|
||||
except Exception as e:
|
||||
log.error("Planning session start failed for %s: %s", slug, e)
|
||||
return f"Planning blocat: {e}\n\nÎncearcă din nou cu /plan {slug} <descriere>."
|
||||
|
||||
entry["status"] = "planning"
|
||||
entry["planning_session_id"] = session.planning_session_id
|
||||
if not entry.get("description"):
|
||||
entry["description"] = description
|
||||
_save_approved_tasks(data)
|
||||
return first_response
|
||||
|
||||
|
||||
def _revert_status_for_slug(slug: str, to: str = "pending") -> None:
|
||||
"""Revert a project's status (planning → `to`) given its slug."""
|
||||
if not slug:
|
||||
return
|
||||
data = _load_approved_tasks()
|
||||
changed = False
|
||||
for p in data["projects"]:
|
||||
if p["name"].lower() == slug.lower() and p.get("status") == "planning":
|
||||
p["status"] = to
|
||||
p["planning_session_id"] = None
|
||||
changed = True
|
||||
break
|
||||
if changed:
|
||||
_save_approved_tasks(data)
|
||||
|
||||
|
||||
def _approve_from_planning(channel_id: str, adapter_name: str) -> str:
|
||||
"""User clicked 'Dau drumul' inside an active planning session.
|
||||
|
||||
Promotes status `planning` → `approved` and clears planning state.
|
||||
Returns confirmation text.
|
||||
"""
|
||||
state = get_planning_state(adapter_name, channel_id)
|
||||
if not state:
|
||||
return "Nu există o sesiune de planning activă."
|
||||
slug = state.get("slug")
|
||||
if not slug:
|
||||
return "Sesiunea de planning nu are slug — anulează cu /cancel și ia-o de la capăt."
|
||||
|
||||
data = _load_approved_tasks()
|
||||
final_plan_path = state.get("final_plan_path") or str(
|
||||
PlanningOrchestrator.final_plan_path(slug)
|
||||
)
|
||||
found = False
|
||||
for p in data["projects"]:
|
||||
if p["name"].lower() == slug.lower():
|
||||
p["status"] = "approved"
|
||||
p["approved_at"] = datetime.now(timezone.utc).isoformat()
|
||||
p["planning_session_id"] = None
|
||||
p["final_plan_path"] = final_plan_path
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
return f"Proiectul `{slug}` lipsește din approved-tasks.json. Anulează cu /cancel."
|
||||
_save_approved_tasks(data)
|
||||
clear_planning_state(adapter_name, channel_id)
|
||||
return (
|
||||
f"✅ Aprobat: `{slug}`. Ralph începe la 23:00.\n"
|
||||
f" Plan: `{final_plan_path}`"
|
||||
)
|
||||
|
||||
|
||||
# Public helpers — re-exported for adapter wiring.
|
||||
def planning_state_for(channel_id: str, adapter_name: str) -> dict | None:
|
||||
"""Return current planning state for (adapter, channel) — adapter helper."""
|
||||
return get_planning_state(adapter_name, channel_id)
|
||||
|
||||
|
||||
def planning_advance(
|
||||
channel_id: str,
|
||||
adapter_name: str,
|
||||
on_text: Callable[[str], None] | None = None,
|
||||
) -> tuple[str, bool]:
|
||||
"""Advance the planning pipeline by one phase.
|
||||
|
||||
Returns (response_text, completed_bool).
|
||||
"""
|
||||
_session, text, completed = PlanningOrchestrator.advance(
|
||||
adapter_name, channel_id, on_text=on_text,
|
||||
)
|
||||
return text, completed
|
||||
|
||||
|
||||
def planning_cancel(channel_id: str, adapter_name: str) -> str:
|
||||
"""Cancel an active planning session and revert project status."""
|
||||
state = get_planning_state(adapter_name, channel_id)
|
||||
if not state:
|
||||
return "Nu era nicio sesiune de planning activă."
|
||||
slug = state.get("slug")
|
||||
PlanningOrchestrator.cancel(adapter_name, channel_id)
|
||||
if slug:
|
||||
_revert_status_for_slug(slug, to="pending")
|
||||
return "Planning anulat. Status revenit la pending."
|
||||
|
||||
|
||||
def planning_approve(channel_id: str, adapter_name: str) -> str:
|
||||
"""Promote planning → approved (e.g. button click 'Dau drumul')."""
|
||||
return _approve_from_planning(channel_id, adapter_name)
|
||||
|
||||
61
tasks/spike-planning-findings.md
Normal file
61
tasks/spike-planning-findings.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# Spike Step 0 — Planning Subprocess Feasibility
|
||||
|
||||
**Date:** 2026-04-26
|
||||
**Plan:** `/home/moltbot/.claude/plans/home-moltbot-claude-plans-vreau-ca-come-snoopy-spark.md`
|
||||
**Verdict:** ✅ **PASS** — proceed with W1 + W2 + W3 as planned
|
||||
|
||||
---
|
||||
|
||||
## Pass criteria check
|
||||
|
||||
| # | Criteriu plan | Rezultat | Detalii |
|
||||
|---|---------------|----------|---------|
|
||||
| 1 | Subprocess termină în <60s/turn | ✅ | 49s pentru turn cu prompt simplu, 82s pentru resume. |
|
||||
| 2 | Output text readable (AskUserQuestion → text) | ✅ | Skill-ul detectează `-p` mode și serializează întrebarea în `result` text natural, NU blochează cu tool. |
|
||||
| 3 | Session ID returnat și reusabil cu `--resume` | ✅ | Confirmat round-trip; context preservat între turn-uri. |
|
||||
| 4 | Artifact pe disk apare în locația așteptată | ⚠️ N/A pentru spike | Artifactul se creează la sfârșitul fazei (multi-turn). Spike-ul a verificat doar primele 2 turn-uri; comportamentul de la sfârșit rămâne de validat în W2 implementation. |
|
||||
|
||||
---
|
||||
|
||||
## Test runs (raw)
|
||||
|
||||
| Run | Prompt | --max-turns | Result | Cost | Notes |
|
||||
|-----|--------|-------------|--------|------|-------|
|
||||
| 1 | `/office-hours Vreau sa adaug filtru genuri pe game-library` | 1 | `error_max_turns` (stop=tool_use) | $0.56 | Skill a iterat tool_use; nu a apucat să termine. |
|
||||
| 2 | (același) | 8 | `error_max_turns` (stop=tool_use, num_turns=9) | $0.68 | Skill a făcut deep context-gathering (Bash×N pentru git log/grep) fără a ajunge la întrebare în turn budget. |
|
||||
| 3 | `/office-hours Test feasibility minimal` | 5 | ✅ `completed` (end_turn) în 49s | $0.55 | Skill a detectat ambiguitate, a output-at întrebarea ca **text** în `result`, exit clean. |
|
||||
| 4 | `--resume 175bb0c3...` cu reply substantive | 6 | ✅ `completed` (end_turn) în 82s | $1.11 | Skill a continuat natural, a output structured premises + clarification questions. |
|
||||
|
||||
---
|
||||
|
||||
## Concluzii cheie
|
||||
|
||||
### Validate
|
||||
- **Pattern subprocess funcționează**: Echo poate `claude -p '/skill'` + `claude --resume <id> -p '<user reply>'` într-un loop cu user-in-the-loop.
|
||||
- **AskUserQuestion convertit la text**: skill-ul gstack detectează single-shot mode și serializează întrebările în output text. Eng review's concern (#1, #2) **refutat empiric**.
|
||||
- **Multi-language support**: skill răspunde în limba prompt-ului (ro detectat corect).
|
||||
|
||||
### Cavetate și ajustări față de plan
|
||||
- **Turn budget critical**: prompts complexe (cu context concret de proiect) declanșează deep tool-use chains (read CLAUDE.md, grep, git log etc.) și pot eșua cu max-turns mic.
|
||||
- **Recomandare W2**: `--max-turns 15-20` per invocare; detect `terminal_reason != "completed"` → fallback strategy (retry cu mesaj mai concis, sau abort cu mesaj util către user).
|
||||
- **Cost per turn**: $0.5-1.1 pe runde Opus 4.7 1M context. Pentru full planning (4 faze × ~5 runde = 20 turns) ar putea fi $10-22 per proiect. Marius pe subscription deci e doar rate-limit pressure, nu USD direct.
|
||||
- **Recomandare W2**: monitoring rate limits; instrumentation usage `total_cost_usd` aggregat per planning session.
|
||||
- **Hot-restart limitation acceptat**: dacă echo-core restart mid-planning, sesiunea Claude rămâne pe disk (resume funcționează), DAR child subprocess (dacă e long-running) se pierde. Re-resume e necesar la repornire.
|
||||
|
||||
### Riscuri rămase pentru W2
|
||||
1. **Skills înlănțuite** (ex: `/plan-ceo-review` urmat de `/plan-eng-review` în sesiuni separate, citind disk artifacts) — netestat în spike. Risc mediu — știm că skill-uri individuale merg.
|
||||
2. **`--add-dir` cu paths multiple** pentru a expune ~/workspace/<slug> + ~/.gstack/projects/ + ~/.claude/skills/ — netestat. Risc mic, doar configurație.
|
||||
3. **Stream-json input pentru pipe stdin response** — netestat; alternativa naivă (concat în prompt) merge.
|
||||
|
||||
---
|
||||
|
||||
## Decision
|
||||
|
||||
**Proceed with W1 → W2 → W3 sequence per plan.** Spike-ul confirmă fezabilitatea fundamentală a Partea C. Riscurile rămase sunt incrementale, nu blocaj arhitectural.
|
||||
|
||||
W2 trebuie să implementeze:
|
||||
- `PlanningOrchestrator` cu `--max-turns 20` default și retry strategy pe `error_max_turns`
|
||||
- Telemetry pe `total_cost_usd` în `result` JSON pentru rate limit monitoring
|
||||
- Re-resume logic la restart echo-core (citește `sessions/planning.json`, continuă cu `--resume <stored_id>`)
|
||||
|
||||
**Total cost spike**: ~$2.88 (subscription rate-limit consumption, nu USD direct).
|
||||
250
tests/test_dag_execution.py
Normal file
250
tests/test_dag_execution.py
Normal file
@@ -0,0 +1,250 @@
|
||||
"""Tests for W3 DAG execution + retry guard.
|
||||
|
||||
Acoperă:
|
||||
- topological_eligible: alegere story DAG-aware (passes/failed/blocked propagation)
|
||||
- cmd_incr_retry: 3-retry guard cu auto-fail la max_retries
|
||||
- cmd_mark_failed: propagare blocked la dependenți
|
||||
- _normalize_story: validează schema extinsă (tags, dependsOn, retries, blocked)
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
PROJECT_ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(PROJECT_ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(PROJECT_ROOT))
|
||||
|
||||
from tools import ralph_dag # noqa: E402
|
||||
from tools.ralph_prd_generator import _normalize_story # noqa: E402
|
||||
|
||||
|
||||
# ── topological_eligible ───────────────────────────────────────
|
||||
|
||||
|
||||
def _stories(*specs):
|
||||
"""Helper: build minimal stories list. Each spec = (id, priority, dependsOn, flags...)"""
|
||||
out = []
|
||||
for spec in specs:
|
||||
sid, prio, deps, *rest = spec
|
||||
s = {
|
||||
"id": sid,
|
||||
"priority": prio,
|
||||
"dependsOn": list(deps),
|
||||
"passes": False,
|
||||
"failed": False,
|
||||
"blocked": False,
|
||||
"retries": 0,
|
||||
}
|
||||
for flag in rest:
|
||||
s[flag] = True
|
||||
out.append(s)
|
||||
return out
|
||||
|
||||
|
||||
class TestTopologicalEligible:
|
||||
def test_no_deps_lowest_priority_picked(self):
|
||||
stories = _stories(
|
||||
("US-002", 20, []),
|
||||
("US-001", 10, []),
|
||||
)
|
||||
chosen = ralph_dag.topological_eligible(stories)
|
||||
assert chosen["id"] == "US-001"
|
||||
|
||||
def test_dependent_skipped_until_dep_passes(self):
|
||||
stories = _stories(
|
||||
("US-001", 10, []),
|
||||
("US-002", 20, ["US-001"]),
|
||||
)
|
||||
# US-001 not done yet → US-001 picked
|
||||
assert ralph_dag.topological_eligible(stories)["id"] == "US-001"
|
||||
|
||||
# Mark US-001 passes → US-002 eligible
|
||||
stories[0]["passes"] = True
|
||||
assert ralph_dag.topological_eligible(stories)["id"] == "US-002"
|
||||
|
||||
def test_failed_dep_propagates_blocked(self):
|
||||
stories = _stories(
|
||||
("US-001", 10, []),
|
||||
("US-002", 20, ["US-001"]),
|
||||
)
|
||||
stories[0]["failed"] = True
|
||||
chosen = ralph_dag.topological_eligible(stories)
|
||||
assert chosen is None # US-002 marcat blocked, nimic eligibil
|
||||
assert stories[1]["blocked"] is True
|
||||
assert stories[1]["failureReason"] == "blocked_by:US-001"
|
||||
|
||||
def test_independent_runs_when_other_chain_failed(self):
|
||||
# US-001 failed → US-002 blocked, dar US-003 e independent → eligibil
|
||||
stories = _stories(
|
||||
("US-001", 10, []),
|
||||
("US-002", 20, ["US-001"]),
|
||||
("US-003", 30, []),
|
||||
)
|
||||
stories[0]["failed"] = True
|
||||
chosen = ralph_dag.topological_eligible(stories)
|
||||
assert chosen["id"] == "US-003"
|
||||
assert stories[1]["blocked"] is True
|
||||
|
||||
def test_chain_blocking_propagates_transitively(self):
|
||||
# US-001 → US-002 → US-003. US-001 failed → US-002 blocked → US-003 blocked.
|
||||
stories = _stories(
|
||||
("US-001", 10, []),
|
||||
("US-002", 20, ["US-001"]),
|
||||
("US-003", 30, ["US-002"]),
|
||||
)
|
||||
stories[0]["failed"] = True
|
||||
chosen = ralph_dag.topological_eligible(stories)
|
||||
assert chosen is None
|
||||
assert stories[1]["blocked"] is True
|
||||
assert stories[2]["blocked"] is True
|
||||
|
||||
def test_all_complete_returns_none(self):
|
||||
stories = _stories(("US-001", 10, []))
|
||||
stories[0]["passes"] = True
|
||||
assert ralph_dag.topological_eligible(stories) is None
|
||||
|
||||
def test_already_blocked_story_skipped(self):
|
||||
stories = _stories(
|
||||
("US-001", 10, []),
|
||||
("US-002", 20, []),
|
||||
)
|
||||
stories[0]["blocked"] = True
|
||||
chosen = ralph_dag.topological_eligible(stories)
|
||||
assert chosen["id"] == "US-002"
|
||||
|
||||
|
||||
# ── cmd_incr_retry / cmd_mark_failed (file-based) ──────────────
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def prd_path(tmp_path):
|
||||
"""Construiește un prd.json minimal pentru test."""
|
||||
data = {
|
||||
"projectName": "test-proj",
|
||||
"branchName": "ralph/test-proj",
|
||||
"userStories": [
|
||||
{
|
||||
"id": "US-001", "title": "first", "priority": 10,
|
||||
"dependsOn": [], "tags": [], "acceptanceCriteria": ["a"],
|
||||
"passes": False, "failed": False, "blocked": False, "retries": 0,
|
||||
},
|
||||
{
|
||||
"id": "US-002", "title": "second", "priority": 20,
|
||||
"dependsOn": ["US-001"], "tags": [], "acceptanceCriteria": ["a"],
|
||||
"passes": False, "failed": False, "blocked": False, "retries": 0,
|
||||
},
|
||||
],
|
||||
}
|
||||
p = tmp_path / "prd.json"
|
||||
p.write_text(json.dumps(data), encoding="utf-8")
|
||||
return p
|
||||
|
||||
|
||||
class TestRetryGuard:
|
||||
def test_incr_retry_increments_count(self, prd_path):
|
||||
rc = ralph_dag.cmd_incr_retry(prd_path, "US-001")
|
||||
assert rc == 0
|
||||
data = json.loads(prd_path.read_text())
|
||||
assert data["userStories"][0]["retries"] == 1
|
||||
assert data["userStories"][0]["failed"] is False
|
||||
|
||||
def test_three_retries_marks_failed_max_retries(self, prd_path):
|
||||
# incr 3 times
|
||||
for _ in range(3):
|
||||
ralph_dag.cmd_incr_retry(prd_path, "US-001")
|
||||
data = json.loads(prd_path.read_text())
|
||||
assert data["userStories"][0]["retries"] == 3
|
||||
assert data["userStories"][0]["failed"] is True
|
||||
assert data["userStories"][0]["failureReason"] == "max_retries"
|
||||
|
||||
def test_max_retries_propagates_blocked_to_dependent(self, prd_path):
|
||||
for _ in range(3):
|
||||
ralph_dag.cmd_incr_retry(prd_path, "US-001")
|
||||
data = json.loads(prd_path.read_text())
|
||||
# US-002 depinde de US-001 → blocked
|
||||
assert data["userStories"][1]["blocked"] is True
|
||||
|
||||
def test_unknown_story_returns_error(self, prd_path):
|
||||
rc = ralph_dag.cmd_incr_retry(prd_path, "US-999")
|
||||
assert rc == 1
|
||||
|
||||
|
||||
class TestMarkFailed:
|
||||
def test_mark_failed_sets_flags(self, prd_path):
|
||||
rc = ralph_dag.cmd_mark_failed(prd_path, "US-001", "rate_limited")
|
||||
assert rc == 0
|
||||
data = json.loads(prd_path.read_text())
|
||||
assert data["userStories"][0]["failed"] is True
|
||||
assert data["userStories"][0]["failureReason"] == "rate_limited"
|
||||
|
||||
def test_mark_failed_propagates_blocked(self, prd_path):
|
||||
ralph_dag.cmd_mark_failed(prd_path, "US-001", "rate_limited")
|
||||
data = json.loads(prd_path.read_text())
|
||||
assert data["userStories"][1]["blocked"] is True
|
||||
assert data["userStories"][1]["failureReason"] == "blocked_by:US-001"
|
||||
|
||||
|
||||
class TestNextStory:
|
||||
def test_next_story_prints_id(self, prd_path, capsys):
|
||||
rc = ralph_dag.cmd_next_story(prd_path)
|
||||
assert rc == 0
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out.strip() == "US-001"
|
||||
|
||||
def test_next_story_returns_1_when_none_eligible(self, prd_path, capsys):
|
||||
# Mark all complete
|
||||
data = json.loads(prd_path.read_text())
|
||||
for s in data["userStories"]:
|
||||
s["passes"] = True
|
||||
prd_path.write_text(json.dumps(data))
|
||||
rc = ralph_dag.cmd_next_story(prd_path)
|
||||
assert rc == 1
|
||||
|
||||
|
||||
# ── _normalize_story (PRD generator schema) ────────────────────
|
||||
|
||||
|
||||
class TestNormalizeStory:
|
||||
def test_default_fields_populated(self):
|
||||
s = _normalize_story({"title": "x"}, idx=0)
|
||||
# Schema W3 fields trebuie să existe toate
|
||||
for key in ("id", "title", "description", "priority", "acceptanceCriteria",
|
||||
"tags", "dependsOn", "passes", "failed", "blocked", "retries",
|
||||
"failureReason", "notes"):
|
||||
assert key in s, f"Missing schema field: {key}"
|
||||
assert s["passes"] is False
|
||||
assert s["failed"] is False
|
||||
assert s["blocked"] is False
|
||||
assert s["retries"] == 0
|
||||
|
||||
def test_invalid_tags_filtered(self):
|
||||
s = _normalize_story({"title": "x", "tags": ["frontend", "ui", "made-up"]}, idx=0)
|
||||
assert s["tags"] == ["ui"] # frontend & made-up nu sunt în VALID_TAGS
|
||||
|
||||
def test_empty_acceptance_gets_default(self):
|
||||
s = _normalize_story({"title": "x"}, idx=0)
|
||||
assert len(s["acceptanceCriteria"]) >= 1
|
||||
|
||||
def test_ui_tag_implies_browser_check(self):
|
||||
s = _normalize_story({"title": "x", "tags": ["ui"]}, idx=0)
|
||||
assert s["requiresBrowserCheck"] is True
|
||||
|
||||
def test_explicit_browser_check_preserved(self):
|
||||
s = _normalize_story({"title": "x", "tags": [], "requiresBrowserCheck": True}, idx=0)
|
||||
assert s["requiresBrowserCheck"] is True
|
||||
|
||||
def test_id_auto_generated_from_idx(self):
|
||||
s = _normalize_story({"title": "x"}, idx=4)
|
||||
assert s["id"] == "US-005"
|
||||
|
||||
def test_id_preserved_when_provided(self):
|
||||
s = _normalize_story({"id": "US-042", "title": "x"}, idx=0)
|
||||
assert s["id"] == "US-042"
|
||||
|
||||
def test_depends_on_preserves_strings_filters_garbage(self):
|
||||
s = _normalize_story({"title": "x", "dependsOn": ["US-001", "", None, "US-002"]}, idx=0)
|
||||
assert s["dependsOn"] == ["US-001", "US-002"]
|
||||
209
tests/test_dashboard_ralph_endpoint.py
Normal file
209
tests/test_dashboard_ralph_endpoint.py
Normal file
@@ -0,0 +1,209 @@
|
||||
"""Tests for /api/ralph/* endpoints (dashboard live).
|
||||
|
||||
Acoperă:
|
||||
- /api/ralph/status: list cards cu state + count + fetchedAt
|
||||
- /api/ralph/<slug>/log: tail progress.txt
|
||||
- /api/ralph/<slug>/prd: full prd.json
|
||||
- _ralph_validate_slug: path traversal protection
|
||||
- corrupt prd.json: graceful error (status='error', not 500)
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
PROJECT_ROOT = Path(__file__).resolve().parents[1]
|
||||
DASH = PROJECT_ROOT / "dashboard"
|
||||
if str(DASH) not in sys.path:
|
||||
sys.path.insert(0, str(DASH))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def ralph_module():
|
||||
from handlers import ralph as _r # type: ignore
|
||||
return _r
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def handler(ralph_module, tmp_path, monkeypatch):
|
||||
"""Build a stubbed handler with a temp WORKSPACE_DIR."""
|
||||
import constants # type: ignore
|
||||
|
||||
# Re-route WORKSPACE_DIR la tmp pentru izolare
|
||||
monkeypatch.setattr(constants, "WORKSPACE_DIR", tmp_path)
|
||||
|
||||
class _Stub(ralph_module.RalphHandlers):
|
||||
def __init__(self):
|
||||
self.captured = None
|
||||
self.captured_code = None
|
||||
self.path = "/api/ralph/status"
|
||||
|
||||
def send_json(self, data, code=200):
|
||||
self.captured = data
|
||||
self.captured_code = code
|
||||
|
||||
def send_error(self, code):
|
||||
self.captured = {"error_code": code}
|
||||
self.captured_code = code
|
||||
|
||||
return _Stub()
|
||||
|
||||
|
||||
def _make_ralph_project(workspace: Path, slug: str, stories: list, progress: str = "init"):
|
||||
"""Create a fake ralph project under workspace/<slug>/scripts/ralph/."""
|
||||
ralph_dir = workspace / slug / "scripts" / "ralph"
|
||||
ralph_dir.mkdir(parents=True, exist_ok=True)
|
||||
(ralph_dir / "prd.json").write_text(json.dumps({
|
||||
"projectName": slug,
|
||||
"branchName": f"ralph/{slug}",
|
||||
"userStories": stories,
|
||||
}), encoding="utf-8")
|
||||
(ralph_dir / "progress.txt").write_text(progress, encoding="utf-8")
|
||||
return ralph_dir
|
||||
|
||||
|
||||
# ── /api/ralph/status ──────────────────────────────────────────
|
||||
|
||||
|
||||
class TestStatus:
|
||||
def test_empty_workspace_returns_empty(self, handler):
|
||||
handler.handle_ralph_status()
|
||||
assert handler.captured_code == 200
|
||||
assert handler.captured["projects"] == []
|
||||
assert "fetchedAt" in handler.captured
|
||||
|
||||
def test_status_skips_non_ralph_projects(self, handler, tmp_path):
|
||||
# Create a project WITHOUT scripts/ralph
|
||||
(tmp_path / "regular-proj").mkdir()
|
||||
handler.handle_ralph_status()
|
||||
assert handler.captured["projects"] == []
|
||||
|
||||
def test_status_lists_ralph_projects(self, handler, tmp_path):
|
||||
_make_ralph_project(tmp_path, "proj-a", [
|
||||
{"id": "US-001", "title": "a", "priority": 10, "passes": True,
|
||||
"failed": False, "blocked": False, "retries": 0, "tags": []},
|
||||
{"id": "US-002", "title": "b", "priority": 20, "passes": False,
|
||||
"failed": False, "blocked": False, "retries": 0, "tags": ["ui"]},
|
||||
])
|
||||
handler.handle_ralph_status()
|
||||
projects = handler.captured["projects"]
|
||||
assert len(projects) == 1
|
||||
p = projects[0]
|
||||
assert p["slug"] == "proj-a"
|
||||
assert p["storiesTotal"] == 2
|
||||
assert p["storiesComplete"] == 1
|
||||
assert p["storiesFailed"] == 0
|
||||
assert p["storiesBlocked"] == 0
|
||||
assert p["status"] == "idle" # not running (no .ralph.pid)
|
||||
|
||||
def test_status_corrupt_prd_returns_error_not_500(self, handler, tmp_path):
|
||||
ralph_dir = tmp_path / "broken" / "scripts" / "ralph"
|
||||
ralph_dir.mkdir(parents=True)
|
||||
(ralph_dir / "prd.json").write_text("{not valid json", encoding="utf-8")
|
||||
handler.handle_ralph_status()
|
||||
assert handler.captured_code == 200
|
||||
assert any(p.get("status") == "error" for p in handler.captured["projects"])
|
||||
|
||||
def test_status_count_matches_projects(self, handler, tmp_path):
|
||||
_make_ralph_project(tmp_path, "p1", [])
|
||||
_make_ralph_project(tmp_path, "p2", [])
|
||||
handler.handle_ralph_status()
|
||||
assert handler.captured["count"] == 2
|
||||
|
||||
def test_complete_status_when_all_pass(self, handler, tmp_path):
|
||||
_make_ralph_project(tmp_path, "donezo", [
|
||||
{"id": "US-001", "passes": True, "failed": False, "blocked": False,
|
||||
"retries": 0, "tags": [], "title": "x", "priority": 10},
|
||||
])
|
||||
handler.handle_ralph_status()
|
||||
p = handler.captured["projects"][0]
|
||||
assert p["status"] == "complete"
|
||||
|
||||
def test_failed_status_propagation(self, handler, tmp_path):
|
||||
_make_ralph_project(tmp_path, "broken-proj", [
|
||||
{"id": "US-001", "passes": False, "failed": True, "blocked": False,
|
||||
"retries": 3, "tags": [], "title": "x", "priority": 10,
|
||||
"failureReason": "max_retries"},
|
||||
])
|
||||
handler.handle_ralph_status()
|
||||
p = handler.captured["projects"][0]
|
||||
assert p["status"] == "failed"
|
||||
assert p["storiesFailed"] == 1
|
||||
|
||||
|
||||
# ── /api/ralph/<slug>/log ──────────────────────────────────────
|
||||
|
||||
|
||||
class TestLog:
|
||||
def test_log_returns_progress_lines(self, handler, tmp_path):
|
||||
_make_ralph_project(tmp_path, "p1", [], progress="line1\nline2\nline3")
|
||||
handler.path = "/api/ralph/p1/log"
|
||||
handler.handle_ralph_log("p1")
|
||||
assert handler.captured_code == 200
|
||||
assert handler.captured["lines"] == ["line1", "line2", "line3"]
|
||||
assert handler.captured["total"] == 3
|
||||
|
||||
def test_log_invalid_slug_400(self, handler):
|
||||
handler.handle_ralph_log("../etc/passwd")
|
||||
assert handler.captured_code == 400
|
||||
|
||||
def test_log_path_traversal_blocked(self, handler):
|
||||
handler.handle_ralph_log("..")
|
||||
assert handler.captured_code == 400
|
||||
|
||||
def test_log_missing_progress_returns_empty(self, handler, tmp_path):
|
||||
ralph_dir = tmp_path / "noprogress" / "scripts" / "ralph"
|
||||
ralph_dir.mkdir(parents=True)
|
||||
(ralph_dir / "prd.json").write_text("{}") # no progress.txt
|
||||
handler.path = "/api/ralph/noprogress/log"
|
||||
handler.handle_ralph_log("noprogress")
|
||||
assert handler.captured_code == 200
|
||||
assert handler.captured["lines"] == []
|
||||
|
||||
|
||||
# ── /api/ralph/<slug>/prd ──────────────────────────────────────
|
||||
|
||||
|
||||
class TestPrd:
|
||||
def test_prd_returns_full_json(self, handler, tmp_path):
|
||||
stories = [{"id": "US-001", "passes": False, "title": "t", "priority": 10}]
|
||||
_make_ralph_project(tmp_path, "p1", stories)
|
||||
handler.handle_ralph_prd("p1")
|
||||
assert handler.captured_code == 200
|
||||
assert handler.captured["projectName"] == "p1"
|
||||
assert len(handler.captured["userStories"]) == 1
|
||||
|
||||
def test_prd_404_when_missing(self, handler, tmp_path):
|
||||
(tmp_path / "ghost").mkdir() # exists, but no prd.json
|
||||
handler.handle_ralph_prd("ghost")
|
||||
assert handler.captured_code == 404
|
||||
|
||||
def test_prd_invalid_slug_400(self, handler):
|
||||
handler.handle_ralph_prd("/etc/passwd")
|
||||
assert handler.captured_code == 400
|
||||
|
||||
|
||||
# ── _ralph_validate_slug ───────────────────────────────────────
|
||||
|
||||
|
||||
class TestValidateSlug:
|
||||
def test_valid_slug_returns_path(self, handler, tmp_path):
|
||||
(tmp_path / "good-slug").mkdir()
|
||||
result = handler._ralph_validate_slug("good-slug")
|
||||
assert result is not None
|
||||
assert result.name == "good-slug"
|
||||
|
||||
def test_slash_rejected(self, handler):
|
||||
assert handler._ralph_validate_slug("a/b") is None
|
||||
|
||||
def test_dotdot_rejected(self, handler):
|
||||
assert handler._ralph_validate_slug("..") is None
|
||||
|
||||
def test_empty_rejected(self, handler):
|
||||
assert handler._ralph_validate_slug("") is None
|
||||
|
||||
def test_nonexistent_returns_none(self, handler):
|
||||
assert handler._ralph_validate_slug("does-not-exist") is None
|
||||
243
tests/test_planning_orchestrator.py
Normal file
243
tests/test_planning_orchestrator.py
Normal file
@@ -0,0 +1,243 @@
|
||||
"""Tests for src/planning_orchestrator.py — phase pipeline coordinator."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from src import planning_orchestrator, planning_session
|
||||
from src.planning_orchestrator import (
|
||||
BASE_PHASES,
|
||||
DESIGN_PHASE,
|
||||
PlanningOrchestrator,
|
||||
_phases_for,
|
||||
has_ui_scope,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def tmp_planning_state(tmp_path, monkeypatch):
|
||||
fake_sessions_dir = tmp_path / "sessions"
|
||||
fake_sessions_dir.mkdir()
|
||||
fake_state = fake_sessions_dir / "planning.json"
|
||||
monkeypatch.setattr(planning_session, "SESSIONS_DIR", fake_sessions_dir)
|
||||
monkeypatch.setattr(planning_session, "PLANNING_STATE_FILE", fake_state)
|
||||
yield fake_state
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def fake_workspace(tmp_path, monkeypatch):
|
||||
workspace = tmp_path / "workspace"
|
||||
workspace.mkdir()
|
||||
(workspace / "demo").mkdir()
|
||||
monkeypatch.setattr(planning_session, "WORKSPACE_ROOT", workspace)
|
||||
monkeypatch.setattr(planning_orchestrator, "WORKSPACE_ROOT", workspace)
|
||||
yield workspace
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# has_ui_scope / _phases_for
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestUiScopeHeuristic:
|
||||
@pytest.mark.parametrize("text,expected", [
|
||||
("redesign UI for the dashboard", True),
|
||||
("add a button on settings page", True),
|
||||
("frontend cleanup", True),
|
||||
("Adaugă filtru genuri pe pagina de game-library", True), # ro
|
||||
("schimbă culoarea butonului de submit", True), # ro
|
||||
("refactor utility helpers", False),
|
||||
("rewrite the database migration scripts", False),
|
||||
("tweak the rate limiter", False),
|
||||
])
|
||||
def test_detects_ui_keywords(self, text, expected):
|
||||
assert has_ui_scope(text) is expected
|
||||
|
||||
def test_phases_for_excludes_design_when_no_ui(self):
|
||||
phases = _phases_for("refactor utility")
|
||||
assert phases == BASE_PHASES
|
||||
assert DESIGN_PHASE not in phases
|
||||
|
||||
def test_phases_for_appends_design_for_ui(self):
|
||||
phases = _phases_for("add login page")
|
||||
assert phases[-1] == DESIGN_PHASE
|
||||
assert phases[: len(BASE_PHASES)] == BASE_PHASES
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Orchestrator start / respond / advance / cancel — mock subprocess
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _fake_result(session_id="sess-1", text="hi"):
|
||||
return {
|
||||
"result": text,
|
||||
"session_id": session_id,
|
||||
"usage": {"input_tokens": 1000, "output_tokens": 200},
|
||||
"total_cost_usd": 0.4,
|
||||
"subtype": "success",
|
||||
"is_error": False,
|
||||
}
|
||||
|
||||
|
||||
class TestOrchestratorStart:
|
||||
def test_start_persists_phases_planned(
|
||||
self, tmp_planning_state, fake_workspace
|
||||
):
|
||||
with patch(
|
||||
"src.planning_session._run_claude",
|
||||
return_value=_fake_result(session_id="s-1"),
|
||||
):
|
||||
sess, first = PlanningOrchestrator.start(
|
||||
"demo", "Add login button", "ch-1", adapter="discord"
|
||||
)
|
||||
assert sess.session_id == "s-1"
|
||||
from src.planning_session import get_planning_state
|
||||
state = get_planning_state("discord", "ch-1")
|
||||
assert state["phase"] == BASE_PHASES[0] # /office-hours
|
||||
# UI scope → design phase included
|
||||
assert state["phases_planned"][-1] == DESIGN_PHASE
|
||||
assert state["phase_index"] == 0
|
||||
assert state["phases_completed"] == []
|
||||
|
||||
def test_start_no_ui_scope_no_design_phase(
|
||||
self, tmp_planning_state, fake_workspace
|
||||
):
|
||||
with patch(
|
||||
"src.planning_session._run_claude",
|
||||
return_value=_fake_result(session_id="s-1"),
|
||||
):
|
||||
PlanningOrchestrator.start(
|
||||
"demo", "refactor utility helpers", "ch-1", adapter="discord"
|
||||
)
|
||||
from src.planning_session import get_planning_state
|
||||
state = get_planning_state("discord", "ch-1")
|
||||
assert DESIGN_PHASE not in state["phases_planned"]
|
||||
|
||||
|
||||
class TestOrchestratorRespond:
|
||||
def test_respond_returns_text_and_phase_ready_marker(
|
||||
self, tmp_planning_state, fake_workspace
|
||||
):
|
||||
with patch(
|
||||
"src.planning_session._run_claude",
|
||||
return_value=_fake_result(session_id="s-1", text="initial"),
|
||||
):
|
||||
PlanningOrchestrator.start("demo", "Add login button", "ch-1", "discord")
|
||||
|
||||
ready_text = "ok we are done. PHASE_STATUS: ready_to_advance"
|
||||
with patch(
|
||||
"src.planning_session._run_claude",
|
||||
return_value=_fake_result(session_id="s-1", text=ready_text),
|
||||
):
|
||||
sess, response, ready = PlanningOrchestrator.respond(
|
||||
"discord", "ch-1", "user reply"
|
||||
)
|
||||
assert response == ready_text
|
||||
assert ready is True
|
||||
assert sess is not None
|
||||
|
||||
def test_respond_returns_none_when_no_state(self, tmp_planning_state):
|
||||
sess, text, ready = PlanningOrchestrator.respond(
|
||||
"discord", "ch-missing", "hi"
|
||||
)
|
||||
assert sess is None
|
||||
assert "Nu există" in text
|
||||
assert ready is False
|
||||
|
||||
|
||||
class TestOrchestratorAdvance:
|
||||
def test_advance_starts_next_phase_fresh_subprocess(
|
||||
self, tmp_planning_state, fake_workspace
|
||||
):
|
||||
# Phase 1 → office-hours
|
||||
with patch(
|
||||
"src.planning_session._run_claude",
|
||||
return_value=_fake_result(session_id="s-1"),
|
||||
):
|
||||
PlanningOrchestrator.start(
|
||||
"demo", "Add login button", "ch-1", "discord"
|
||||
)
|
||||
|
||||
# Advance → /plan-ceo-review fresh subprocess
|
||||
with patch(
|
||||
"src.planning_session._run_claude",
|
||||
return_value=_fake_result(session_id="s-2", text="ceo phase started"),
|
||||
) as mock_run:
|
||||
sess, text, completed = PlanningOrchestrator.advance(
|
||||
"discord", "ch-1"
|
||||
)
|
||||
mock_run.assert_called_once()
|
||||
# Verify the new subprocess has /plan-ceo-review in prompt (NOT --resume)
|
||||
cmd = mock_run.call_args[0][0]
|
||||
assert "/plan-ceo-review" in cmd[2]
|
||||
assert "--resume" not in cmd
|
||||
assert sess.session_id == "s-2"
|
||||
assert sess.phase == "/plan-ceo-review"
|
||||
assert completed is False
|
||||
|
||||
from src.planning_session import get_planning_state
|
||||
state = get_planning_state("discord", "ch-1")
|
||||
assert "/office-hours" in state["phases_completed"]
|
||||
assert state["phase_index"] == 1
|
||||
|
||||
def test_advance_writes_final_plan_when_pipeline_complete(
|
||||
self, tmp_planning_state, fake_workspace
|
||||
):
|
||||
# Manually seed state at the last phase.
|
||||
from src.planning_session import _save_planning_state, _channel_key
|
||||
# Build phase plan with 2 phases for brevity (skip design for non-UI).
|
||||
state = {
|
||||
_channel_key("discord", "ch-1"): {
|
||||
"slug": "demo",
|
||||
"description": "refactor utility",
|
||||
"phase": "/plan-eng-review",
|
||||
"phases_planned": ["/office-hours", "/plan-ceo-review", "/plan-eng-review"],
|
||||
"phase_index": 2,
|
||||
"phases_completed": ["/office-hours", "/plan-ceo-review"],
|
||||
"session_id": "s-eng",
|
||||
"planning_session_id": "ps-uuid",
|
||||
"adapter": "discord",
|
||||
"channel_id": "ch-1",
|
||||
"started_at": "2026-04-26T20:00:00+00:00",
|
||||
"updated_at": "2026-04-26T20:30:00+00:00",
|
||||
"last_text_excerpt": "eng review done",
|
||||
"last_subtype": "success",
|
||||
}
|
||||
}
|
||||
_save_planning_state(state)
|
||||
|
||||
# Advance with no more phases — should write final-plan stub, no claude call.
|
||||
with patch("src.planning_session._run_claude") as mock_run:
|
||||
sess, text, completed = PlanningOrchestrator.advance(
|
||||
"discord", "ch-1"
|
||||
)
|
||||
mock_run.assert_not_called()
|
||||
assert completed is True
|
||||
plan_path = PlanningOrchestrator.final_plan_path("demo")
|
||||
assert plan_path.exists()
|
||||
content = plan_path.read_text(encoding="utf-8")
|
||||
assert "demo" in content
|
||||
assert "refactor utility" in content
|
||||
# All phases listed
|
||||
assert "/office-hours" in content
|
||||
assert "/plan-ceo-review" in content
|
||||
assert "/plan-eng-review" in content
|
||||
|
||||
|
||||
class TestOrchestratorCancel:
|
||||
def test_cancel_clears_state(self, tmp_planning_state, fake_workspace):
|
||||
with patch(
|
||||
"src.planning_session._run_claude",
|
||||
return_value=_fake_result(session_id="s-1"),
|
||||
):
|
||||
PlanningOrchestrator.start("demo", "x", "ch-1", "discord")
|
||||
from src.planning_session import is_in_planning
|
||||
assert is_in_planning("discord", "ch-1") is True
|
||||
assert PlanningOrchestrator.cancel("discord", "ch-1") is True
|
||||
assert is_in_planning("discord", "ch-1") is False
|
||||
|
||||
def test_cancel_returns_false_when_no_state(self, tmp_planning_state):
|
||||
assert PlanningOrchestrator.cancel("discord", "ch-x") is False
|
||||
278
tests/test_planning_session.py
Normal file
278
tests/test_planning_session.py
Normal file
@@ -0,0 +1,278 @@
|
||||
"""Tests for src/planning_session.py — PlanningSession + state persistence."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from src import planning_session
|
||||
from src.planning_session import (
|
||||
PHASE_NEEDS_INPUT_MARKER,
|
||||
PHASE_READY_MARKER,
|
||||
PlanningSession,
|
||||
_channel_key,
|
||||
build_planning_system_prompt,
|
||||
clear_planning_state,
|
||||
get_planning_state,
|
||||
is_in_planning,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fixtures
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def tmp_planning_state(tmp_path, monkeypatch):
|
||||
"""Redirect planning state file into a tmp dir for each test."""
|
||||
fake_sessions_dir = tmp_path / "sessions"
|
||||
fake_sessions_dir.mkdir()
|
||||
fake_state = fake_sessions_dir / "planning.json"
|
||||
monkeypatch.setattr(planning_session, "SESSIONS_DIR", fake_sessions_dir)
|
||||
monkeypatch.setattr(planning_session, "PLANNING_STATE_FILE", fake_state)
|
||||
yield fake_state
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def fake_workspace(tmp_path, monkeypatch):
|
||||
"""Pretend ~/workspace/<slug>/ exists so PlanningSession.cwd resolves."""
|
||||
workspace = tmp_path / "workspace"
|
||||
workspace.mkdir()
|
||||
(workspace / "demo").mkdir()
|
||||
monkeypatch.setattr(planning_session, "WORKSPACE_ROOT", workspace)
|
||||
yield workspace
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# build_planning_system_prompt
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestBuildPlanningSystemPrompt:
|
||||
def test_substitutes_slug_phase_description(self):
|
||||
prompt = build_planning_system_prompt(
|
||||
slug="demo", description="Add filter X", phase="/office-hours"
|
||||
)
|
||||
# Even if the prompt template differs, the values must appear at least
|
||||
# once each.
|
||||
assert "demo" in prompt
|
||||
assert "Add filter X" in prompt
|
||||
assert "/office-hours" in prompt
|
||||
|
||||
def test_returns_empty_when_template_missing(self, tmp_path, monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
planning_session, "PLANNING_PROMPT_FILE", tmp_path / "missing.md"
|
||||
)
|
||||
assert build_planning_system_prompt("a", "b", "/x") == ""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# state get/set/clear
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestPlanningState:
|
||||
def test_clear_returns_false_when_absent(self, tmp_planning_state):
|
||||
assert clear_planning_state("discord", "ch-1") is False
|
||||
|
||||
def test_get_returns_none_when_absent(self, tmp_planning_state):
|
||||
assert get_planning_state("discord", "ch-1") is None
|
||||
assert is_in_planning("discord", "ch-1") is False
|
||||
|
||||
def test_persist_and_recover(self, tmp_planning_state, fake_workspace):
|
||||
# Build a session WITHOUT actually invoking claude — call _persist directly.
|
||||
sess = PlanningSession(
|
||||
slug="demo",
|
||||
description="desc",
|
||||
phase="/office-hours",
|
||||
channel_id="ch-1",
|
||||
adapter="discord",
|
||||
session_id="sess-uuid-1",
|
||||
)
|
||||
sess._last_response = "hello world " + PHASE_NEEDS_INPUT_MARKER
|
||||
sess._last_subtype = "success"
|
||||
sess._persist(action="start", cost_usd=0.42)
|
||||
|
||||
assert is_in_planning("discord", "ch-1") is True
|
||||
state = get_planning_state("discord", "ch-1")
|
||||
assert state is not None
|
||||
assert state["slug"] == "demo"
|
||||
assert state["session_id"] == "sess-uuid-1"
|
||||
assert state["phase"] == "/office-hours"
|
||||
assert state["last_subtype"] == "success"
|
||||
assert "hello world" in state["last_text_excerpt"]
|
||||
|
||||
recovered = PlanningSession.from_state("discord", "ch-1")
|
||||
assert recovered is not None
|
||||
assert recovered.slug == "demo"
|
||||
assert recovered.session_id == "sess-uuid-1"
|
||||
assert recovered.phase == "/office-hours"
|
||||
|
||||
assert clear_planning_state("discord", "ch-1") is True
|
||||
assert get_planning_state("discord", "ch-1") is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# is_phase_ready
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestIsPhaseReady:
|
||||
def test_returns_true_when_marker_present(self, fake_workspace):
|
||||
sess = PlanningSession("demo", "d", "/x", "ch", session_id="abc")
|
||||
sess._last_response = f"some text {PHASE_READY_MARKER}"
|
||||
assert sess.is_phase_ready() is True
|
||||
|
||||
def test_returns_false_when_marker_absent(self, fake_workspace):
|
||||
sess = PlanningSession("demo", "d", "/x", "ch", session_id="abc")
|
||||
sess._last_response = "some text without marker"
|
||||
assert sess.is_phase_ready() is False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# cwd resolution
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestCwd:
|
||||
def test_workspace_dir_used_when_present(self, fake_workspace):
|
||||
sess = PlanningSession("demo", "d", "/x", "ch")
|
||||
assert sess.cwd == fake_workspace / "demo"
|
||||
|
||||
def test_falls_back_to_project_root_when_missing(
|
||||
self, fake_workspace, monkeypatch
|
||||
):
|
||||
sess = PlanningSession("nonexistent-slug", "d", "/x", "ch")
|
||||
# Falls back to PROJECT_ROOT
|
||||
assert sess.cwd == planning_session.PROJECT_ROOT
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# command construction
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestBuildCmd:
|
||||
def test_start_includes_skill_phase_and_max_turns(self, fake_workspace):
|
||||
sess = PlanningSession("demo", "Add filter", "/office-hours", "ch")
|
||||
cmd = sess._build_cmd(
|
||||
"/office-hours Add filter",
|
||||
resume=None,
|
||||
max_turns=20,
|
||||
with_system_prompt=False,
|
||||
)
|
||||
assert cmd[0:3] == [planning_session.CLAUDE_BIN, "-p", "/office-hours Add filter"]
|
||||
assert "--max-turns" in cmd
|
||||
assert "20" in cmd
|
||||
assert "--output-format" in cmd
|
||||
assert "stream-json" in cmd
|
||||
assert "--dangerously-skip-permissions" in cmd
|
||||
# No --resume on a fresh start
|
||||
assert "--resume" not in cmd
|
||||
|
||||
def test_resume_includes_resume_flag(self, fake_workspace):
|
||||
sess = PlanningSession(
|
||||
"demo", "Add filter", "/office-hours", "ch", session_id="abc"
|
||||
)
|
||||
cmd = sess._build_cmd(
|
||||
"user reply",
|
||||
resume="abc",
|
||||
max_turns=20,
|
||||
with_system_prompt=False,
|
||||
)
|
||||
assert "--resume" in cmd
|
||||
assert "abc" in cmd
|
||||
|
||||
def test_with_system_prompt_appends_flag(self, fake_workspace, tmp_path, monkeypatch):
|
||||
# Create a tiny prompt file so build_planning_system_prompt returns text.
|
||||
fake = tmp_path / "planning_agent.md"
|
||||
fake.write_text("phase={phase} slug={slug}", encoding="utf-8")
|
||||
monkeypatch.setattr(planning_session, "PLANNING_PROMPT_FILE", fake)
|
||||
sess = PlanningSession("demo", "d", "/office-hours", "ch")
|
||||
cmd = sess._build_cmd(
|
||||
"prompt", resume=None, max_turns=20, with_system_prompt=True
|
||||
)
|
||||
assert "--system-prompt" in cmd
|
||||
idx = cmd.index("--system-prompt")
|
||||
assert "phase=/office-hours" in cmd[idx + 1]
|
||||
assert "slug=demo" in cmd[idx + 1]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# start() — integration-flavoured, mocks _run_claude
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestStart:
|
||||
def test_persists_session_id_and_response(
|
||||
self, tmp_planning_state, fake_workspace
|
||||
):
|
||||
fake_result = {
|
||||
"result": "Bună! Câteva întrebări… " + PHASE_NEEDS_INPUT_MARKER,
|
||||
"session_id": "claude-uuid-99",
|
||||
"usage": {"input_tokens": 1000, "output_tokens": 200},
|
||||
"total_cost_usd": 0.55,
|
||||
"subtype": "success",
|
||||
"is_error": False,
|
||||
}
|
||||
with patch("src.planning_session._run_claude", return_value=fake_result) as mock_run:
|
||||
sess = PlanningSession.start(
|
||||
slug="demo",
|
||||
description="Add filter X",
|
||||
phase="/office-hours",
|
||||
channel_id="ch-1",
|
||||
adapter="discord",
|
||||
)
|
||||
mock_run.assert_called_once()
|
||||
# cwd kw passed
|
||||
_, kwargs = mock_run.call_args
|
||||
assert "cwd" in kwargs
|
||||
assert sess.session_id == "claude-uuid-99"
|
||||
assert "Bună" in sess.last_response
|
||||
state = get_planning_state("discord", "ch-1")
|
||||
assert state["session_id"] == "claude-uuid-99"
|
||||
assert state["slug"] == "demo"
|
||||
assert state["phase"] == "/office-hours"
|
||||
|
||||
def test_retries_on_error_max_turns(self, tmp_planning_state, fake_workspace):
|
||||
# First call returns error_max_turns with no session_id; second returns success.
|
||||
first = {
|
||||
"result": "deep tool use",
|
||||
"session_id": "",
|
||||
"usage": {},
|
||||
"total_cost_usd": 0.6,
|
||||
"subtype": "error_max_turns",
|
||||
"is_error": True,
|
||||
}
|
||||
second = {
|
||||
"result": "now I have a question",
|
||||
"session_id": "claude-uuid-2",
|
||||
"usage": {},
|
||||
"total_cost_usd": 0.7,
|
||||
"subtype": "success",
|
||||
"is_error": False,
|
||||
}
|
||||
with patch(
|
||||
"src.planning_session._run_claude", side_effect=[first, second]
|
||||
) as mock_run:
|
||||
sess = PlanningSession.start(
|
||||
slug="demo",
|
||||
description="Add filter X",
|
||||
phase="/office-hours",
|
||||
channel_id="ch-1",
|
||||
adapter="discord",
|
||||
)
|
||||
assert mock_run.call_count == 2
|
||||
assert sess.session_id == "claude-uuid-2"
|
||||
# Second call uses RETRY_MAX_TURNS
|
||||
second_args = mock_run.call_args_list[1][0][0]
|
||||
assert "30" in second_args # RETRY_MAX_TURNS
|
||||
|
||||
def test_respond_requires_session_id(self, fake_workspace):
|
||||
sess = PlanningSession("demo", "d", "/x", "ch") # no session_id
|
||||
with pytest.raises(RuntimeError):
|
||||
sess.respond("hello")
|
||||
108
tests/test_ralph_flow.py
Normal file
108
tests/test_ralph_flow.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""Tests for src/ralph_flow.py — short-lived per-user UX state."""
|
||||
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
import pytest
|
||||
|
||||
from src import ralph_flow
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _isolate_state_file(tmp_path, monkeypatch):
|
||||
"""Redirect state file to a tmp location for each test."""
|
||||
monkeypatch.setattr(ralph_flow, "_STATE_FILE", tmp_path / "ralph_flow.json")
|
||||
monkeypatch.setattr(ralph_flow, "SESSIONS_DIR", tmp_path)
|
||||
|
||||
|
||||
def test_get_state_returns_none_when_absent():
|
||||
assert ralph_flow.get_state("discord", "c1", "u1") is None
|
||||
|
||||
|
||||
def test_set_then_get_round_trip():
|
||||
ralph_flow.set_state(
|
||||
"discord", "c1", "u1",
|
||||
step=ralph_flow.STEP_INPUT_DESCRIPTION,
|
||||
project="roa2web",
|
||||
)
|
||||
state = ralph_flow.get_state("discord", "c1", "u1")
|
||||
assert state is not None
|
||||
assert state["step"] == ralph_flow.STEP_INPUT_DESCRIPTION
|
||||
assert state["project"] == "roa2web"
|
||||
assert "expires_at" in state
|
||||
|
||||
|
||||
def test_clear_state_removes_entry():
|
||||
ralph_flow.set_state("telegram", "42", "7", step="input_description")
|
||||
assert ralph_flow.clear_state("telegram", "42", "7") is True
|
||||
assert ralph_flow.get_state("telegram", "42", "7") is None
|
||||
# Second clear is a no-op
|
||||
assert ralph_flow.clear_state("telegram", "42", "7") is False
|
||||
|
||||
|
||||
def test_state_keyed_by_adapter_chat_user():
|
||||
"""Different adapters / chats / users have isolated state."""
|
||||
ralph_flow.set_state("discord", "c1", "u1", step="input_description", project="A")
|
||||
ralph_flow.set_state("telegram", "c1", "u1", step="input_description", project="B")
|
||||
ralph_flow.set_state("discord", "c2", "u1", step="input_description", project="C")
|
||||
ralph_flow.set_state("discord", "c1", "u2", step="input_description", project="D")
|
||||
|
||||
assert ralph_flow.get_state("discord", "c1", "u1")["project"] == "A"
|
||||
assert ralph_flow.get_state("telegram", "c1", "u1")["project"] == "B"
|
||||
assert ralph_flow.get_state("discord", "c2", "u1")["project"] == "C"
|
||||
assert ralph_flow.get_state("discord", "c1", "u2")["project"] == "D"
|
||||
|
||||
|
||||
def test_expired_state_returns_none_and_self_cleans(monkeypatch):
|
||||
"""get_state on an expired entry should return None and drop the entry."""
|
||||
# Set with 0s TTL — already expired
|
||||
ralph_flow.set_state(
|
||||
"discord", "c1", "u1",
|
||||
step="input_description",
|
||||
ttl_seconds=0,
|
||||
)
|
||||
assert ralph_flow.get_state("discord", "c1", "u1") is None
|
||||
# Verify entry was dropped from disk
|
||||
assert ralph_flow._load() == {}
|
||||
|
||||
|
||||
def test_cleanup_expired_drops_only_expired():
|
||||
ralph_flow.set_state("discord", "c1", "u1", step="x", ttl_seconds=0) # expired
|
||||
ralph_flow.set_state("discord", "c2", "u2", step="y", ttl_seconds=600) # fresh
|
||||
|
||||
dropped = ralph_flow.cleanup_expired()
|
||||
|
||||
assert dropped == 1
|
||||
assert ralph_flow.get_state("discord", "c1", "u1") is None
|
||||
assert ralph_flow.get_state("discord", "c2", "u2") is not None
|
||||
|
||||
|
||||
def test_set_state_overwrites_previous():
|
||||
ralph_flow.set_state("discord", "c1", "u1", step="step_a", project="P1")
|
||||
ralph_flow.set_state("discord", "c1", "u1", step="step_b", project="P2")
|
||||
state = ralph_flow.get_state("discord", "c1", "u1")
|
||||
assert state["step"] == "step_b"
|
||||
assert state["project"] == "P2"
|
||||
|
||||
|
||||
def test_set_state_extras_propagate():
|
||||
ralph_flow.set_state(
|
||||
"discord", "c1", "u1",
|
||||
step="x",
|
||||
custom_field="hello",
|
||||
nested={"a": 1},
|
||||
)
|
||||
state = ralph_flow.get_state("discord", "c1", "u1")
|
||||
assert state["custom_field"] == "hello"
|
||||
assert state["nested"] == {"a": 1}
|
||||
|
||||
|
||||
def test_corrupted_state_file_returns_empty(tmp_path):
|
||||
"""If state file is corrupt JSON, _load returns {} so get_state stays robust."""
|
||||
ralph_flow._STATE_FILE.write_text("not json {")
|
||||
assert ralph_flow.get_state("discord", "c1", "u1") is None
|
||||
|
||||
|
||||
def test_atomic_write_does_not_leave_temp_files(tmp_path):
|
||||
ralph_flow.set_state("discord", "c1", "u1", step="x")
|
||||
leftovers = [p for p in tmp_path.iterdir() if p.name.startswith(".ralph_flow_")]
|
||||
assert leftovers == []
|
||||
@@ -134,6 +134,31 @@ class TestStatusCommand:
|
||||
assert is_cmd is True
|
||||
|
||||
|
||||
# --- Ralph command dispatch ---
|
||||
|
||||
|
||||
class TestRalphDispatch:
|
||||
def test_p_without_args_returns_usage(self):
|
||||
response, is_cmd = route_message("ch-1", "user-1", "/p")
|
||||
assert "Folosire: /p" in response
|
||||
assert is_cmd is True
|
||||
|
||||
def test_whatsapp_appends_redirect_hint_on_usage(self):
|
||||
"""WhatsApp users see a redirect line pointing them to Discord/TG."""
|
||||
response, is_cmd = route_message(
|
||||
"ch-1", "user-1", "/p", adapter_name="whatsapp"
|
||||
)
|
||||
assert "Folosire: /p" in response
|
||||
assert "Discord sau Telegram" in response
|
||||
|
||||
def test_discord_does_not_get_whatsapp_redirect(self):
|
||||
response, is_cmd = route_message(
|
||||
"ch-1", "user-1", "/p", adapter_name="discord"
|
||||
)
|
||||
assert "Folosire: /p" in response
|
||||
assert "Discord sau Telegram" not in response
|
||||
|
||||
|
||||
# --- Unknown command ---
|
||||
|
||||
|
||||
|
||||
273
tests/test_router_planning.py
Normal file
273
tests/test_router_planning.py
Normal file
@@ -0,0 +1,273 @@
|
||||
"""Tests for src/router.py planning integration (W2 — state-aware routing,
|
||||
start_planning_session, planning_approve, planning_cancel)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from src import planning_session, planning_orchestrator, router
|
||||
from src.planning_session import _channel_key, _save_planning_state
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def tmp_state(tmp_path, monkeypatch):
|
||||
"""Redirect planning + approved-tasks into tmp."""
|
||||
sessions_dir = tmp_path / "sessions"
|
||||
sessions_dir.mkdir()
|
||||
monkeypatch.setattr(planning_session, "SESSIONS_DIR", sessions_dir)
|
||||
monkeypatch.setattr(
|
||||
planning_session, "PLANNING_STATE_FILE", sessions_dir / "planning.json"
|
||||
)
|
||||
|
||||
# approved-tasks.json — point router at a tmp file
|
||||
approved = tmp_path / "approved-tasks.json"
|
||||
approved.write_text(json.dumps({"projects": [], "last_updated": None}))
|
||||
monkeypatch.setattr(router, "APPROVED_TASKS_FILE", approved)
|
||||
|
||||
# workspace dir for planning orchestrator final-plan.md target
|
||||
workspace = tmp_path / "workspace"
|
||||
workspace.mkdir()
|
||||
(workspace / "demo").mkdir()
|
||||
monkeypatch.setattr(planning_session, "WORKSPACE_ROOT", workspace)
|
||||
monkeypatch.setattr(planning_orchestrator, "WORKSPACE_ROOT", workspace)
|
||||
yield {"sessions": sessions_dir, "approved": approved, "workspace": workspace}
|
||||
|
||||
|
||||
def _fake_result(session_id="s-1", text="hi"):
|
||||
return {
|
||||
"result": text,
|
||||
"session_id": session_id,
|
||||
"usage": {"input_tokens": 10, "output_tokens": 5},
|
||||
"total_cost_usd": 0.1,
|
||||
"subtype": "success",
|
||||
"is_error": False,
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# start_planning_session
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestStartPlanningSession:
|
||||
def test_creates_entry_and_sets_status_planning(self, tmp_state):
|
||||
with patch(
|
||||
"src.planning_session._run_claude",
|
||||
return_value=_fake_result(session_id="s-1", text="first"),
|
||||
):
|
||||
response = router.start_planning_session(
|
||||
"demo", "Add filter X", "ch-1", "discord"
|
||||
)
|
||||
assert response == "first"
|
||||
approved = json.loads(tmp_state["approved"].read_text())
|
||||
assert len(approved["projects"]) == 1
|
||||
entry = approved["projects"][0]
|
||||
assert entry["name"] == "demo"
|
||||
assert entry["status"] == "planning"
|
||||
assert entry["planning_session_id"] # uuid
|
||||
|
||||
def test_promotes_existing_pending_entry(self, tmp_state):
|
||||
# Pre-seed an existing pending entry
|
||||
approved_data = {
|
||||
"projects": [
|
||||
{
|
||||
"name": "demo",
|
||||
"description": "from earlier",
|
||||
"status": "pending",
|
||||
"planning_session_id": None,
|
||||
"final_plan_path": None,
|
||||
"proposed_at": "2026-04-26T18:00:00+00:00",
|
||||
"approved_at": None,
|
||||
"started_at": None,
|
||||
"pid": None,
|
||||
}
|
||||
],
|
||||
"last_updated": None,
|
||||
}
|
||||
tmp_state["approved"].write_text(json.dumps(approved_data))
|
||||
|
||||
with patch(
|
||||
"src.planning_session._run_claude",
|
||||
return_value=_fake_result(session_id="s-2", text="hi"),
|
||||
):
|
||||
router.start_planning_session(
|
||||
"demo", "Add filter X", "ch-1", "discord"
|
||||
)
|
||||
approved = json.loads(tmp_state["approved"].read_text())
|
||||
assert len(approved["projects"]) == 1
|
||||
assert approved["projects"][0]["status"] == "planning"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# planning_approve
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestPlanningApprove:
|
||||
def test_promotes_status_and_clears_state(self, tmp_state):
|
||||
# Seed an active planning state + approved-tasks pending entry
|
||||
_save_planning_state({
|
||||
_channel_key("discord", "ch-1"): {
|
||||
"slug": "demo",
|
||||
"description": "x",
|
||||
"phase": "__complete__",
|
||||
"phases_planned": ["/office-hours"],
|
||||
"phases_completed": ["/office-hours"],
|
||||
"phase_index": 1,
|
||||
"session_id": "s-uuid",
|
||||
"planning_session_id": "ps-uuid",
|
||||
"final_plan_path": "/tmp/final-plan.md",
|
||||
"adapter": "discord",
|
||||
"channel_id": "ch-1",
|
||||
"started_at": "2026-04-26T20:00:00+00:00",
|
||||
"updated_at": "2026-04-26T20:30:00+00:00",
|
||||
"last_text_excerpt": "done",
|
||||
"last_subtype": "success",
|
||||
}
|
||||
})
|
||||
approved_data = {
|
||||
"projects": [
|
||||
{
|
||||
"name": "demo",
|
||||
"description": "x",
|
||||
"status": "planning",
|
||||
"planning_session_id": "ps-uuid",
|
||||
"final_plan_path": None,
|
||||
"proposed_at": "2026-04-26T18:00:00+00:00",
|
||||
"approved_at": None,
|
||||
"started_at": None,
|
||||
"pid": None,
|
||||
}
|
||||
],
|
||||
"last_updated": None,
|
||||
}
|
||||
tmp_state["approved"].write_text(json.dumps(approved_data))
|
||||
|
||||
msg = router.planning_approve("ch-1", "discord")
|
||||
assert "Aprobat" in msg or "✅" in msg
|
||||
|
||||
approved = json.loads(tmp_state["approved"].read_text())
|
||||
entry = approved["projects"][0]
|
||||
assert entry["status"] == "approved"
|
||||
assert entry["approved_at"] is not None
|
||||
assert entry["planning_session_id"] is None
|
||||
assert entry["final_plan_path"] # set
|
||||
|
||||
# Planning state cleared
|
||||
from src.planning_session import is_in_planning
|
||||
assert is_in_planning("discord", "ch-1") is False
|
||||
|
||||
def test_no_state_returns_error_message(self, tmp_state):
|
||||
msg = router.planning_approve("ch-missing", "discord")
|
||||
assert "Nu există" in msg
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# planning_cancel via route_message /cancel
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestRouteMessagePlanningCancel:
|
||||
def test_slash_cancel_in_planning_clears_state(self, tmp_state):
|
||||
# Seed a planning session and approved-tasks pending entry
|
||||
_save_planning_state({
|
||||
_channel_key("discord", "ch-1"): {
|
||||
"slug": "demo",
|
||||
"description": "x",
|
||||
"phase": "/office-hours",
|
||||
"phases_planned": ["/office-hours", "/plan-ceo-review"],
|
||||
"phases_completed": [],
|
||||
"phase_index": 0,
|
||||
"session_id": "s-uuid",
|
||||
"planning_session_id": "ps-uuid",
|
||||
"adapter": "discord",
|
||||
"channel_id": "ch-1",
|
||||
"started_at": "2026-04-26T20:00:00+00:00",
|
||||
"updated_at": "2026-04-26T20:00:00+00:00",
|
||||
"last_text_excerpt": "Hi",
|
||||
"last_subtype": "success",
|
||||
}
|
||||
})
|
||||
approved_data = {
|
||||
"projects": [
|
||||
{
|
||||
"name": "demo",
|
||||
"description": "x",
|
||||
"status": "planning",
|
||||
"planning_session_id": "ps-uuid",
|
||||
"final_plan_path": None,
|
||||
"proposed_at": "2026-04-26T18:00:00+00:00",
|
||||
"approved_at": None,
|
||||
"started_at": None,
|
||||
"pid": None,
|
||||
}
|
||||
],
|
||||
"last_updated": None,
|
||||
}
|
||||
tmp_state["approved"].write_text(json.dumps(approved_data))
|
||||
|
||||
response, is_cmd = router.route_message(
|
||||
"ch-1", "user-1", "/cancel", adapter_name="discord"
|
||||
)
|
||||
assert is_cmd is True
|
||||
assert "anulat" in response.lower()
|
||||
approved = json.loads(tmp_state["approved"].read_text())
|
||||
assert approved["projects"][0]["status"] == "pending"
|
||||
from src.planning_session import is_in_planning
|
||||
assert is_in_planning("discord", "ch-1") is False
|
||||
|
||||
|
||||
class TestRouteMessagePlanningRespond:
|
||||
def test_plain_message_in_planning_routes_to_orchestrator(self, tmp_state):
|
||||
# Seed a planning session
|
||||
_save_planning_state({
|
||||
_channel_key("discord", "ch-1"): {
|
||||
"slug": "demo",
|
||||
"description": "x",
|
||||
"phase": "/office-hours",
|
||||
"phases_planned": ["/office-hours"],
|
||||
"phases_completed": [],
|
||||
"phase_index": 0,
|
||||
"session_id": "s-uuid",
|
||||
"planning_session_id": "ps-uuid",
|
||||
"adapter": "discord",
|
||||
"channel_id": "ch-1",
|
||||
"started_at": "2026-04-26T20:00:00+00:00",
|
||||
"updated_at": "2026-04-26T20:00:00+00:00",
|
||||
"last_text_excerpt": "Hi",
|
||||
"last_subtype": "success",
|
||||
}
|
||||
})
|
||||
with patch(
|
||||
"src.planning_session._run_claude",
|
||||
return_value=_fake_result(
|
||||
session_id="s-uuid", text="thanks PHASE_STATUS: needs_input"
|
||||
),
|
||||
) as mock_run:
|
||||
response, is_cmd = router.route_message(
|
||||
"ch-1", "user-1", "Vreau așa ceva.", adapter_name="discord"
|
||||
)
|
||||
mock_run.assert_called_once()
|
||||
# respond uses --resume
|
||||
cmd = mock_run.call_args[0][0]
|
||||
assert "--resume" in cmd
|
||||
assert is_cmd is False
|
||||
assert "thanks" in response
|
||||
|
||||
def test_no_planning_state_falls_through_to_normal_routing(self, tmp_state):
|
||||
# No planning state — should go to ralph dispatch / Claude.
|
||||
with patch(
|
||||
"src.router.send_message", return_value="claude says hi"
|
||||
) as mock_send:
|
||||
response, is_cmd = router.route_message(
|
||||
"ch-1", "user-1", "hello",
|
||||
adapter_name="discord",
|
||||
model="sonnet",
|
||||
)
|
||||
mock_send.assert_called_once()
|
||||
assert response == "claude says hi"
|
||||
assert is_cmd is False
|
||||
174
tests/test_smart_gates.py
Normal file
174
tests/test_smart_gates.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""Tests for W3 smart gates tag validation heuristic.
|
||||
|
||||
Acoperă:
|
||||
- infer_tags_from_paths: detect ui/db/vercel pe baza file extensions / paths
|
||||
- force_include_tags: combinare tags Opus + tags inferate din diff (anti-silent-regression)
|
||||
- Toate combinatii de tag types (ui, db, vercel, refactor, docs, backend, infra)
|
||||
- Edge cases: tags vide, tags invalide, empty diff
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
PROJECT_ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(PROJECT_ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(PROJECT_ROOT))
|
||||
|
||||
from tools.ralph_dag import ( # noqa: E402
|
||||
VALID_TAGS,
|
||||
force_include_tags,
|
||||
infer_tags_from_paths,
|
||||
)
|
||||
|
||||
|
||||
# ── infer_tags_from_paths ──────────────────────────────────────
|
||||
|
||||
|
||||
class TestInferTags:
|
||||
def test_empty_diff_no_tags(self):
|
||||
assert infer_tags_from_paths([]) == []
|
||||
|
||||
def test_only_readme_no_tags(self):
|
||||
assert infer_tags_from_paths(["README.md", "CHANGELOG.md"]) == []
|
||||
|
||||
def test_vue_triggers_ui(self):
|
||||
assert infer_tags_from_paths(["src/App.vue"]) == ["ui"]
|
||||
|
||||
def test_tsx_triggers_ui(self):
|
||||
assert infer_tags_from_paths(["app/page.tsx"]) == ["ui"]
|
||||
|
||||
def test_jsx_triggers_ui(self):
|
||||
assert infer_tags_from_paths(["src/Button.jsx"]) == ["ui"]
|
||||
|
||||
def test_html_triggers_ui(self):
|
||||
assert infer_tags_from_paths(["dashboard/index.html"]) == ["ui"]
|
||||
|
||||
def test_css_scss_trigger_ui(self):
|
||||
assert infer_tags_from_paths(["src/main.css"]) == ["ui"]
|
||||
assert infer_tags_from_paths(["src/main.scss"]) == ["ui"]
|
||||
|
||||
def test_svelte_triggers_ui(self):
|
||||
assert infer_tags_from_paths(["src/App.svelte"]) == ["ui"]
|
||||
|
||||
def test_migrations_triggers_db(self):
|
||||
assert infer_tags_from_paths(["db/migrations/0001_init.sql"]) == ["db"]
|
||||
|
||||
def test_top_level_migrations_triggers_db(self):
|
||||
assert infer_tags_from_paths(["migrations/2026/04/add_users.sql"]) == ["db"]
|
||||
|
||||
def test_sql_outside_migrations_still_triggers_db(self):
|
||||
assert infer_tags_from_paths(["scripts/seed.sql"]) == ["db"]
|
||||
|
||||
def test_vercel_json_only(self):
|
||||
assert infer_tags_from_paths([], has_vercel_json=True) == ["vercel"]
|
||||
|
||||
def test_combined_ui_db_vercel(self):
|
||||
result = infer_tags_from_paths(
|
||||
["app/page.tsx", "db/migrations/0001.sql"], has_vercel_json=True
|
||||
)
|
||||
assert result == ["ui", "db", "vercel"]
|
||||
|
||||
def test_dedup_when_multiple_files_same_category(self):
|
||||
result = infer_tags_from_paths(["a.tsx", "b.vue", "c.css"])
|
||||
assert result == ["ui"]
|
||||
|
||||
def test_case_insensitive_extensions(self):
|
||||
assert infer_tags_from_paths(["src/App.TSX"]) == ["ui"]
|
||||
assert infer_tags_from_paths(["db/Init.SQL"]) == ["db"]
|
||||
|
||||
|
||||
# ── force_include_tags ─────────────────────────────────────────
|
||||
|
||||
|
||||
class TestForceIncludeTags:
|
||||
def test_existing_only_no_diff(self):
|
||||
assert force_include_tags(["backend"], [], False) == ["backend"]
|
||||
|
||||
def test_diff_inferred_added_to_existing(self):
|
||||
# Opus marcat docs, dar diff atinge .tsx → ui forțat
|
||||
result = force_include_tags(["docs"], ["src/Page.tsx"], False)
|
||||
assert "docs" in result
|
||||
assert "ui" in result
|
||||
|
||||
def test_filters_invalid_tags_from_existing(self):
|
||||
# Tag-ul "frontend" nu e în VALID_TAGS — trebuie eliminat
|
||||
result = force_include_tags(["frontend", "ui"], [], False)
|
||||
assert "frontend" not in result
|
||||
assert "ui" in result
|
||||
|
||||
def test_empty_when_no_existing_no_diff(self):
|
||||
assert force_include_tags([], [], False) == []
|
||||
|
||||
def test_dedup_existing_and_inferred(self):
|
||||
# Existing are ui, diff are .tsx → un singur ui în output
|
||||
result = force_include_tags(["ui"], ["src/A.tsx"], False)
|
||||
assert result.count("ui") == 1
|
||||
|
||||
def test_vercel_added_when_vercel_json_present(self):
|
||||
result = force_include_tags(["backend"], [], has_vercel_json=True)
|
||||
assert "vercel" in result
|
||||
assert "backend" in result
|
||||
|
||||
def test_all_valid_tags_preserved(self):
|
||||
# Verifică că force_include nu strică tags valide existente
|
||||
all_valid = list(VALID_TAGS)
|
||||
result = force_include_tags(all_valid, [], False)
|
||||
for t in all_valid:
|
||||
assert t in result
|
||||
|
||||
def test_order_existing_first_then_inferred(self):
|
||||
# Existing tags trebuie să apară primele (stabilitate API)
|
||||
result = force_include_tags(["backend"], ["src/Page.tsx", "db/migrations/0001.sql"], False)
|
||||
assert result[0] == "backend"
|
||||
assert "ui" in result and "db" in result
|
||||
|
||||
|
||||
# ── Smart gates dispatcher contract (combinatii tag → expected gates) ─────────
|
||||
|
||||
|
||||
# Acesta e un table-test pentru contractul dispatcher-ului din prompt.md.
|
||||
# Verifică doar mapping-ul tag → gate name (specifice prompt.md), nu execuția.
|
||||
GATE_MAPPING = {
|
||||
"refactor": "/workflow:simplify",
|
||||
"ui": "/qa",
|
||||
"vercel": "gh pr checks",
|
||||
"db": "schema diff",
|
||||
"docs": None, # docs => doar typecheck base
|
||||
"backend": "/review",
|
||||
"infra": "/review",
|
||||
}
|
||||
|
||||
|
||||
class TestGateMapping:
|
||||
"""Validează că prompt.md menționează gate-urile așteptate per tag."""
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def prompt_md(self):
|
||||
path = PROJECT_ROOT / "tools" / "ralph" / "prompt.md"
|
||||
return path.read_text(encoding="utf-8")
|
||||
|
||||
def test_refactor_gate_documented(self, prompt_md):
|
||||
assert "/workflow:simplify" in prompt_md
|
||||
|
||||
def test_ui_gate_documented(self, prompt_md):
|
||||
assert "/qa" in prompt_md
|
||||
assert "agent-browser" in prompt_md.lower()
|
||||
|
||||
def test_vercel_gate_documented(self, prompt_md):
|
||||
assert "gh pr checks" in prompt_md
|
||||
|
||||
def test_db_gate_documented(self, prompt_md):
|
||||
assert "schema diff" in prompt_md.lower() or "alembic" in prompt_md.lower()
|
||||
|
||||
def test_backend_gate_documented(self, prompt_md):
|
||||
assert "/review" in prompt_md
|
||||
|
||||
def test_run_all_fallback_documented(self, prompt_md):
|
||||
# Tags vide → run-all-gates fallback (safe default)
|
||||
assert "tags vide" in prompt_md.lower() or "run-all-gates" in prompt_md.lower()
|
||||
|
||||
def test_dag_dependson_documented(self, prompt_md):
|
||||
assert "dependsOn" in prompt_md or "DAG" in prompt_md
|
||||
@@ -2,6 +2,17 @@
|
||||
"projectName": "feature-name",
|
||||
"branchName": "ralph/feature-name",
|
||||
"description": "Descriere scurtă a feature-ului",
|
||||
"techStack": {
|
||||
"type": "python",
|
||||
"commands": {
|
||||
"start": "python main.py",
|
||||
"build": "",
|
||||
"lint": "ruff check .",
|
||||
"typecheck": "mypy .",
|
||||
"test": "pytest"
|
||||
},
|
||||
"port": 8000
|
||||
},
|
||||
"userStories": [
|
||||
{
|
||||
"id": "US-001",
|
||||
@@ -12,7 +23,15 @@
|
||||
"Criteriu specific și verificabil",
|
||||
"npm run typecheck passes"
|
||||
],
|
||||
"tags": [],
|
||||
"dependsOn": [],
|
||||
"requiresBrowserCheck": false,
|
||||
"requiresDesignReview": false,
|
||||
"passes": false,
|
||||
"failed": false,
|
||||
"blocked": false,
|
||||
"retries": 0,
|
||||
"failureReason": "",
|
||||
"notes": ""
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1,203 +1,120 @@
|
||||
# Ralph - Instrucțiuni pentru Iterație
|
||||
# Ralph - Instrucțiuni pentru Iterație (smart gates)
|
||||
|
||||
Ești un agent autonom care implementează user stories dintr-un PRD. Aceasta este O SINGURĂ iterație - implementezi UN singur story și apoi te oprești.
|
||||
Ești un agent autonom care implementează user stories dintr-un PRD. Aceasta este O SINGURĂ iterație — implementezi UN singur story, validezi prin gate-urile relevante, apoi te oprești.
|
||||
|
||||
## Workflow pentru această iterație
|
||||
## Workflow per iterație (4 faze, gates condiționale pe `story.tags`)
|
||||
|
||||
### 1. Citește contextul
|
||||
- PRD-ul și progress.txt sunt furnizate în context
|
||||
- Înțelege ce stories sunt deja complete (`passes: true`)
|
||||
- Identifică următorul story de implementat (prioritate cea mai mică dintre cele incomplete)
|
||||
- Notează `techStack.commands` din PRD pentru comenzile corecte
|
||||
### Faza 0: Citește contextul
|
||||
|
||||
### 2. Management branch
|
||||
- Verifică dacă ești pe branch-ul corect (specificat în `branchName` din PRD)
|
||||
- Dacă nu, creează și checkout branch-ul:
|
||||
```bash
|
||||
git checkout -b <branchName>
|
||||
```
|
||||
- Dacă branch-ul există deja, doar checkout:
|
||||
```bash
|
||||
git checkout <branchName>
|
||||
```
|
||||
- PRD-ul (`prd.json`) și `progress.txt` sunt furnizate în context.
|
||||
- Identifică următorul story candidate:
|
||||
- `passes != true` ȘI `failed != true` ȘI `blocked != true`
|
||||
- DAG: toate ID-urile din `dependsOn[]` au `passes == true` (altfel sare la următorul independent)
|
||||
- Cea mai mică `priority` printre cele eligibile.
|
||||
- Notează `techStack.commands` (lint, typecheck, test, start) și `techStack.port`.
|
||||
- Notează `story.tags[]` — alegerea Faza 3 depinde de ele.
|
||||
|
||||
### 3. Selectează story-ul
|
||||
- Alege story-ul cu cea mai mică prioritate care are `passes: false`
|
||||
- Citește atent acceptance criteria
|
||||
- Verifică câmpul `requiresBrowserCheck` - dacă e `true`, trebuie verificare vizuală
|
||||
### Faza 1: IMPLEMENTARE (mereu)
|
||||
|
||||
### 4. Implementare
|
||||
- Implementează DOAR acest story
|
||||
- Urmează patterns existente în codebase
|
||||
- Fii minimal și focusat - nu adăuga funcționalități extra
|
||||
1.1. **Branch management** — verifică să fii pe `branchName` din PRD; checkout/create dacă lipsește.
|
||||
1.2. **Citește acceptance criteria** — fiecare criteriu e un test mental concret de trecut.
|
||||
1.3. **Implementează cod minimal** — DOAR ce cere story-ul. Urmează patterns existente. Fără over-engineering, fără side features.
|
||||
1.4. **Update `notes`** în `prd.json` cu fișierele atinse (pentru audit ulterior).
|
||||
|
||||
### 5. Quality Checks
|
||||
Rulează TOATE verificările înainte de commit. Folosește comenzile din `techStack.commands`:
|
||||
### Faza 2: QUALITY BASE (mereu, înainte de gates)
|
||||
|
||||
Folosește `techStack.commands`:
|
||||
|
||||
```bash
|
||||
# Folosește comenzile din prd.json techStack.commands:
|
||||
{techStack.commands.typecheck} # Type checking
|
||||
{techStack.commands.lint} # Linting
|
||||
{techStack.commands.test} # Tests (dacă există)
|
||||
{techStack.commands.typecheck} # ex: npm run typecheck / mypy .
|
||||
{techStack.commands.lint} # ex: npm run lint / ruff check .
|
||||
{techStack.commands.test} # ex: npm test / pytest
|
||||
```
|
||||
|
||||
**Comenzi standard per stack:**
|
||||
**Loop intern**: dacă vreuna eșuează → repară și repetă, max 3 retries în această fază. Dacă încă fail după 3, ieși cu sumar de erori în `progress.txt` (ralph.sh va decide retry-ul iterației).
|
||||
|
||||
| Stack | Typecheck | Lint | Test |
|
||||
|-------|-----------|------|------|
|
||||
| Next.js/TS | npm run typecheck | npm run lint | npm test |
|
||||
| Node.js | npm run typecheck | npm run lint | npm test |
|
||||
| Python | mypy . | ruff check . | python -m pytest |
|
||||
| Go | - | golangci-lint run | go test ./... |
|
||||
### Faza 3: SMART GATES (dispatcher pe `story.tags`)
|
||||
|
||||
**IMPORTANT**: Nu face commit dacă verificările eșuează. Repară mai întâi.
|
||||
Tags posibile: `ui`, `db`, `vercel`, `refactor`, `docs`, `backend`, `infra`.
|
||||
|
||||
### 6. Verificare Browser (pentru UI stories)
|
||||
Aplică DOAR gate-urile potrivite — **nu rulează toate**:
|
||||
|
||||
**DACĂ story-ul are `requiresBrowserCheck: true` sau implică UI:**
|
||||
| Tag | Gate |
|
||||
|-------------|---------------------------------------------------------------------------------------|
|
||||
| `refactor` | `/workflow:simplify` pe diff (reduce complexity fără behavior change) |
|
||||
| `ui` | `/qa` Playwright/agent-browser snapshot pe `localhost:{techStack.port}` + screenshot |
|
||||
| `ui` + `requiresDesignReview` | `/plan-design-review` pe screenshot capturat |
|
||||
| `vercel` | push branch + `gh pr checks --watch` (timeout 5 min); fail dacă PR checks eșuează |
|
||||
| `db` | verify schema diff (alembic / prisma migrate diff / `psql \\d+ tablename`) |
|
||||
| `docs` | doar typecheck base (Faza 2 e suficient); skip gate dedicat |
|
||||
| `backend` | `/review` pe diff (intern — second pass review pe API contracts, error handling) |
|
||||
| `infra` | `/review` pe diff + manual smoke test al modificărilor (CI config, Dockerfile, etc.) |
|
||||
| _(tags vide)_ | **run-all-gates fallback** — `/review` + `/qa` + `/workflow:simplify` (safe default) |
|
||||
|
||||
Folosește **agent-browser CLI** pentru verificare vizuală. Agent-browser e optimizat pentru agenți AI cu referințe compacte (@e1, @e2) care consumă minim tokeni.
|
||||
**Mecanism**: skill-urile gstack se invocă prin text mention în prompt — Claude (subprocess `claude -p`) le vede ca tool-uri disponibile via `~/.claude/skills/gstack/`.
|
||||
|
||||
#### 6.1 Pornește dev server-ul
|
||||
```bash
|
||||
# Folosește comanda din techStack.commands.start
|
||||
{techStack.commands.start}
|
||||
# Exemplu: npm run dev
|
||||
```
|
||||
**Multi-tag**: rulează gate-uri pentru fiecare tag (ex: `["ui", "backend"]` → atât `/qa` cât și `/review`).
|
||||
|
||||
Așteaptă să pornească (verifică output-ul pentru "ready" sau similar).
|
||||
**Important**: dacă vreun gate eșuează, NU marca `passes=true`. Repară (max 3 fix-uri în iterație) sau lasă pentru iterația următoare (ralph.sh se ocupă de retry counter).
|
||||
|
||||
#### 6.2 Navighează la pagină
|
||||
```bash
|
||||
agent-browser navigate "http://localhost:{techStack.port}"
|
||||
# Exemplu: agent-browser navigate "http://localhost:3000"
|
||||
```
|
||||
### Faza 4: COMMIT + MARK
|
||||
|
||||
#### 6.3 Ia snapshot pentru verificare
|
||||
```bash
|
||||
agent-browser snapshot
|
||||
```
|
||||
|
||||
Snapshot-ul returnează o listă de elemente cu referințe compacte:
|
||||
```
|
||||
@e1: heading "Welcome"
|
||||
@e2: button "Login"
|
||||
@e3: textbox "Email"
|
||||
@e4: textbox "Password"
|
||||
@e5: button "Submit"
|
||||
```
|
||||
|
||||
**Verifică în snapshot:**
|
||||
- Elementele cheie din acceptance criteria există
|
||||
- Textul e corect
|
||||
- Structura paginii e corectă
|
||||
|
||||
#### 6.4 Testează interacțiunile (dacă e cazul)
|
||||
```bash
|
||||
# Click pe un element
|
||||
agent-browser click @e2
|
||||
|
||||
# Fill un input
|
||||
agent-browser fill @e3 "test@example.com"
|
||||
|
||||
# Așteaptă o schimbare
|
||||
agent-browser snapshot # verifică noua stare
|
||||
```
|
||||
|
||||
#### 6.5 Salvează screenshot ca dovadă
|
||||
```bash
|
||||
agent-browser screenshot ./scripts/ralph/screenshots/US-{id}-$(date +%Y%m%d-%H%M%S).png
|
||||
# Exemplu: agent-browser screenshot ./scripts/ralph/screenshots/US-001-20240115-143022.png
|
||||
```
|
||||
|
||||
#### 6.6 Verifică erori
|
||||
```bash
|
||||
# Verifică console pentru erori
|
||||
agent-browser console
|
||||
```
|
||||
|
||||
**IMPORTANT**:
|
||||
- NU marca story-ul complete dacă verificarea vizuală eșuează!
|
||||
- Dacă găsești erori în browser, repară-le înainte de commit
|
||||
- Screenshots sunt salvate în `scripts/ralph/screenshots/` pentru referință
|
||||
|
||||
### 7. Documentare (dacă ai descoperit ceva util)
|
||||
Dacă ai descoperit patterns sau gotchas, actualizează `AGENTS.md` în directorul relevant:
|
||||
- API patterns
|
||||
- Dependențe non-evidente
|
||||
- Convenții de cod
|
||||
- Cum să testezi anumite funcționalități
|
||||
|
||||
### 8. Commit
|
||||
Format commit message:
|
||||
4.1. **Commit** cu mesaj descriptiv:
|
||||
```
|
||||
feat: [Story ID] - [Story Title]
|
||||
|
||||
- ce ai schimbat (1-3 bullets)
|
||||
- gates rulate: typecheck PASS, lint PASS, /qa PASS
|
||||
```
|
||||
|
||||
### 9. Marchează story-ul ca complet
|
||||
**CRITIC**: Actualizează `scripts/ralph/prd.json`:
|
||||
- Setează `passes: true` pentru story-ul implementat
|
||||
- Adaugă note relevante în câmpul `notes`
|
||||
|
||||
### 10. Actualizează progress.txt
|
||||
Adaugă la sfârșitul fișierului `scripts/ralph/progress.txt`:
|
||||
4.2. **Update `prd.json`**:
|
||||
- `passes: true` DOAR DACĂ toate gate-urile relevante au pasat
|
||||
- `notes` populat cu rezultate gate (ex: "qa: ok, design-review: 8/10")
|
||||
|
||||
4.3. **Append `progress.txt`**:
|
||||
```markdown
|
||||
## Iterație: [timestamp]
|
||||
### Story implementat: [ID] - [Title]
|
||||
### Status: Complete
|
||||
### Story implementat: [ID] - [Title] (tags: [ui, backend])
|
||||
### Status: Complete / Partial / Failed
|
||||
|
||||
### Verificări:
|
||||
### Gates rulate:
|
||||
- Typecheck: PASS
|
||||
- Lint: PASS
|
||||
- Tests: PASS/SKIP
|
||||
- Browser check: PASS/N/A
|
||||
- /qa (ui): PASS — screenshot la scripts/ralph/screenshots/...
|
||||
- /review (backend): PASS
|
||||
|
||||
### Learnings:
|
||||
- [Ce ai învățat]
|
||||
- [Patterns descoperite]
|
||||
- [Patterns descoperite, gotchas]
|
||||
|
||||
### Next steps:
|
||||
- [Ce rămâne de făcut]
|
||||
### Next:
|
||||
- [Stories eligibile pentru iterația următoare]
|
||||
---
|
||||
```
|
||||
|
||||
## Reguli importante
|
||||
|
||||
1. **UN SINGUR STORY PE ITERAȚIE** - Nu implementa mai mult de un story
|
||||
2. **TOATE CHECKS TREBUIE SĂ TREACĂ** - Nu face commit cu erori
|
||||
3. **VERIFICARE BROWSER PENTRU UI** - Obligatorie dacă `requiresBrowserCheck: true`
|
||||
4. **ACTUALIZEAZĂ prd.json** - Altfel iterația următoare va repeta munca
|
||||
5. **FII CONCIS** - Nu over-engineer
|
||||
1. **UN SINGUR STORY PE ITERAȚIE** — nu implementa mai mult de un story.
|
||||
2. **DAG STRICT** — nu sări peste `dependsOn` neîmplinite.
|
||||
3. **GATES PE TAGS** — rulează doar ce e relevant; tags vide = run-all-gates fallback.
|
||||
4. **NU MARCA `passes=true` cu gate failed** — altfel ralph.sh nu va relua story-ul.
|
||||
5. **FII CONCIS** — fără over-engineering, fără docs auto-generate dacă story-ul nu cere.
|
||||
|
||||
## Comenzi agent-browser (referință rapidă)
|
||||
## Comenzi agent-browser (referință rapidă pentru gate `ui`)
|
||||
|
||||
```bash
|
||||
# Navigare
|
||||
agent-browser navigate "http://localhost:3000/page"
|
||||
|
||||
# Snapshot (vedere compactă a paginii)
|
||||
agent-browser snapshot
|
||||
|
||||
# Click pe element (folosind ref din snapshot)
|
||||
agent-browser navigate "http://localhost:{techStack.port}"
|
||||
agent-browser snapshot # listă elemente compactă (@e1, @e2...)
|
||||
agent-browser click @e5
|
||||
|
||||
# Fill input
|
||||
agent-browser fill @e3 "value"
|
||||
|
||||
# Screenshot
|
||||
agent-browser screenshot ./path/to/file.png
|
||||
|
||||
# Console logs
|
||||
agent-browser console
|
||||
|
||||
# Așteaptă text
|
||||
agent-browser screenshot ./scripts/ralph/screenshots/US-{id}-$(date +%Y%m%d-%H%M%S).png
|
||||
agent-browser console # erori JS
|
||||
agent-browser wait-for "Loading complete"
|
||||
```
|
||||
|
||||
## Condiție de terminare
|
||||
|
||||
Dacă TOATE stories au `passes: true`, răspunde cu:
|
||||
Dacă TOATE story-urile au `passes: true` (sau combinat cu `failed: true` / `blocked: true` astfel că nimic nu mai e eligibil):
|
||||
|
||||
```
|
||||
<promise>COMPLETE</promise>
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
#!/bin/bash
|
||||
# Ralph pentru Claude Code - Loop autonom de agent AI
|
||||
# Ralph pentru Claude Code - Loop autonom de agent AI (W3: smart gates + DAG + rate limit)
|
||||
# Adaptat din Ralph original (snarktank/ralph) pentru Claude Code CLI
|
||||
# Usage: ./ralph.sh [max_iterations] [project_dir]
|
||||
#
|
||||
# Env vars (opționale):
|
||||
# RALPH_MAX_TURNS — --max-turns per iter (default 30)
|
||||
# RALPH_RATE_LIMIT_SLEEP — sleep după rate limit detection (default 1800 = 30min)
|
||||
# RALPH_DAG_HELPER — path la tools/ralph_dag.py (auto-detect default)
|
||||
# RALPH_PYTHON — interpreter Python pentru DAG helper (default python3)
|
||||
|
||||
set -e
|
||||
|
||||
@@ -15,6 +21,24 @@ SCREENSHOTS_DIR="$SCRIPT_DIR/screenshots"
|
||||
LAST_BRANCH_FILE="$SCRIPT_DIR/.last-branch"
|
||||
PROMPT_FILE="$SCRIPT_DIR/prompt.md"
|
||||
|
||||
# W3 config
|
||||
MAX_TURNS=${RALPH_MAX_TURNS:-30}
|
||||
RATE_LIMIT_SLEEP=${RALPH_RATE_LIMIT_SLEEP:-1800}
|
||||
RALPH_PYTHON=${RALPH_PYTHON:-python3}
|
||||
|
||||
# DAG helper auto-detect: prefer co-located cu echo-core; fallback la $SCRIPT_DIR
|
||||
if [ -n "$RALPH_DAG_HELPER" ] && [ -f "$RALPH_DAG_HELPER" ]; then
|
||||
DAG_HELPER="$RALPH_DAG_HELPER"
|
||||
elif [ -f "/home/moltbot/echo-core/tools/ralph_dag.py" ]; then
|
||||
DAG_HELPER="/home/moltbot/echo-core/tools/ralph_dag.py"
|
||||
elif [ -f "/home/moltbot/echo-core-qc/tools/ralph_dag.py" ]; then
|
||||
DAG_HELPER="/home/moltbot/echo-core-qc/tools/ralph_dag.py"
|
||||
elif [ -f "$SCRIPT_DIR/ralph_dag.py" ]; then
|
||||
DAG_HELPER="$SCRIPT_DIR/ralph_dag.py"
|
||||
else
|
||||
DAG_HELPER=""
|
||||
fi
|
||||
|
||||
# Verifică că jq este instalat
|
||||
if ! command -v jq &> /dev/null; then
|
||||
echo "Eroare: jq nu este instalat. Rulează: apt install jq"
|
||||
@@ -132,6 +156,51 @@ check_all_complete() {
|
||||
[ "$incomplete" -eq 0 ]
|
||||
}
|
||||
|
||||
# W3: nimic eligibil = toate sunt fie passes, fie failed, fie blocked
|
||||
check_no_eligible() {
|
||||
local n=$(jq '[.userStories[] | select(.passes != true and .failed != true and .blocked != true)] | length' "$PRD_FILE" 2>/dev/null || echo "999")
|
||||
[ "$n" -eq 0 ]
|
||||
}
|
||||
|
||||
# W3: alege next eligible story via DAG helper. Print story ID sau "" dacă nimic.
|
||||
dag_next_story() {
|
||||
if [ -n "$DAG_HELPER" ]; then
|
||||
"$RALPH_PYTHON" "$DAG_HELPER" next-story "$PRD_FILE" 2>/dev/null || echo ""
|
||||
else
|
||||
# Fallback simplu (fără DAG): primul story cu passes!=true && failed!=true && blocked!=true, priority asc
|
||||
jq -r '[.userStories[] | select(.passes != true and .failed != true and .blocked != true)] | sort_by(.priority) | .[0].id // ""' "$PRD_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
dag_incr_retry() {
|
||||
local sid="$1"
|
||||
if [ -n "$DAG_HELPER" ]; then
|
||||
"$RALPH_PYTHON" "$DAG_HELPER" incr-retry "$PRD_FILE" "$sid" 2>/dev/null || echo "0"
|
||||
else
|
||||
echo "0"
|
||||
fi
|
||||
}
|
||||
|
||||
dag_mark_failed() {
|
||||
local sid="$1" reason="$2"
|
||||
if [ -n "$DAG_HELPER" ]; then
|
||||
"$RALPH_PYTHON" "$DAG_HELPER" mark-failed "$PRD_FILE" "$sid" "$reason" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
dag_force_tags() {
|
||||
local sid="$1"
|
||||
if [ -n "$DAG_HELPER" ]; then
|
||||
"$RALPH_PYTHON" "$DAG_HELPER" force-tags "$PRD_FILE" "$sid" "$PROJECT_DIR" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
# W3: detectează rate limit în output Claude (heuristic — Anthropic nu are exit code dedicat)
|
||||
is_rate_limited() {
|
||||
local output="$1"
|
||||
echo "$output" | grep -qiE "rate limit|rate_limit_exceeded|429|too many requests"
|
||||
}
|
||||
|
||||
# Afișare status inițial
|
||||
echo ""
|
||||
echo "======================================================================="
|
||||
@@ -155,6 +224,9 @@ if check_all_complete; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Tracker pentru rate limit retry (max 1 retry de iterație-rate-limit per rulare)
|
||||
RATE_LIMIT_RETRY_USED=0
|
||||
|
||||
# Loop principal
|
||||
for i in $(seq 1 $MAX_ITERATIONS); do
|
||||
echo ""
|
||||
@@ -162,17 +234,38 @@ for i in $(seq 1 $MAX_ITERATIONS); do
|
||||
echo " Ralph Iterația $i din $MAX_ITERATIONS"
|
||||
echo "==================================================================="
|
||||
|
||||
# W3: alege next story via DAG (propagă blocked dacă vreun dep a eșuat)
|
||||
CURRENT_STORY=$(dag_next_story)
|
||||
if [ -z "$CURRENT_STORY" ]; then
|
||||
echo ""
|
||||
echo "==================================================================="
|
||||
if check_all_complete; then
|
||||
echo " TOATE STORY-URILE DIN PRD SUNT COMPLETE!"
|
||||
exit 0
|
||||
else
|
||||
echo " NICIUN STORY ELIGIBIL (toate fie complete, fie failed, fie blocked)"
|
||||
echo " Stories incomplete:"
|
||||
jq -r '.userStories[] | select(.passes != true) | " - \(.id): \(.title) [failed=\(.failed // false) blocked=\(.blocked // false) retries=\(.retries // 0)]"' "$PRD_FILE"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Status curent
|
||||
COMPLETE_NOW=$(jq '[.userStories[] | select(.passes == true)] | length' "$PRD_FILE")
|
||||
NEXT_STORY=$(jq -r '[.userStories[] | select(.passes != true)] | sort_by(.priority) | .[0] | "\(.id): \(.title)"' "$PRD_FILE")
|
||||
NEXT_TITLE=$(jq -r --arg id "$CURRENT_STORY" '.userStories[] | select(.id == $id) | "\(.id): \(.title)"' "$PRD_FILE")
|
||||
STORY_TAGS=$(jq -r --arg id "$CURRENT_STORY" '.userStories[] | select(.id == $id) | (.tags // []) | join(",")' "$PRD_FILE")
|
||||
STORY_RETRIES=$(jq -r --arg id "$CURRENT_STORY" '.userStories[] | select(.id == $id) | (.retries // 0)' "$PRD_FILE")
|
||||
echo " Progress: $COMPLETE_NOW / $TOTAL_STORIES stories complete"
|
||||
echo " Next: $NEXT_STORY"
|
||||
echo " Next: $NEXT_TITLE [tags: ${STORY_TAGS:-<none>}, retries: $STORY_RETRIES]"
|
||||
echo ""
|
||||
|
||||
# Pregătește prompt-ul cu context
|
||||
FULL_PROMPT=$(cat <<EOF
|
||||
# Context pentru această iterație Ralph
|
||||
|
||||
## Story țintă (DAG-eligible):
|
||||
$CURRENT_STORY (tags: ${STORY_TAGS:-<none>})
|
||||
|
||||
## PRD (prd.json):
|
||||
$(cat "$PRD_FILE")
|
||||
|
||||
@@ -188,10 +281,34 @@ EOF
|
||||
LOG_FILE="$SCRIPT_DIR/logs/iteration-$i-$(date +%Y%m%d-%H%M%S).log"
|
||||
mkdir -p "$SCRIPT_DIR/logs"
|
||||
|
||||
# --output-format json avoids streaming mode issues
|
||||
echo "$FULL_PROMPT" | claude -p --dangerously-skip-permissions --output-format json 2>&1 | tee "$LOG_FILE" || true
|
||||
# --output-format json + --max-turns pentru control runtime
|
||||
set +e
|
||||
echo "$FULL_PROMPT" | claude -p \
|
||||
--dangerously-skip-permissions \
|
||||
--output-format json \
|
||||
--max-turns "$MAX_TURNS" \
|
||||
2>&1 | tee "$LOG_FILE"
|
||||
CLAUDE_EXIT=${PIPESTATUS[1]}
|
||||
set -e
|
||||
OUTPUT=$(cat "$LOG_FILE")
|
||||
|
||||
# W3: rate limit detection (max 1 retry per rulare)
|
||||
if is_rate_limited "$OUTPUT" || [ "$CLAUDE_EXIT" = "29" ]; then
|
||||
if [ "$RATE_LIMIT_RETRY_USED" = "0" ]; then
|
||||
echo ""
|
||||
echo " ⏸️ Rate limit detectat. Sleep ${RATE_LIMIT_SLEEP}s, apoi retry o dată."
|
||||
RATE_LIMIT_RETRY_USED=1
|
||||
echo "## Rate limit la iter $i — sleep $RATE_LIMIT_SLEEP" >> "$PROGRESS_FILE"
|
||||
sleep "$RATE_LIMIT_SLEEP"
|
||||
continue # retry aceeași iterație
|
||||
else
|
||||
echo " ❌ Rate limit din nou — abort run, mark $CURRENT_STORY rate_limited"
|
||||
dag_mark_failed "$CURRENT_STORY" "rate_limited"
|
||||
echo "## Rate limit final la iter $i — abort" >> "$PROGRESS_FILE"
|
||||
exit 2
|
||||
fi
|
||||
fi
|
||||
|
||||
# Verifică dacă toate task-urile sunt complete
|
||||
if echo "$OUTPUT" | grep -q "<promise>COMPLETE</promise>"; then
|
||||
echo ""
|
||||
@@ -211,6 +328,23 @@ EOF
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# W3: tag validation post-iter — chiar dacă Opus a marcat docs, dacă diff atinge .vue/.tsx, force ui
|
||||
dag_force_tags "$CURRENT_STORY" >/dev/null 2>&1 || true
|
||||
|
||||
# W3: dacă story-ul curent ÎNCĂ nu trece (passes==false), incrementăm retries
|
||||
STILL_INCOMPLETE=$(jq -r --arg id "$CURRENT_STORY" '.userStories[] | select(.id == $id) | (.passes == true)' "$PRD_FILE")
|
||||
if [ "$STILL_INCOMPLETE" != "true" ]; then
|
||||
NEW_RETRY=$(dag_incr_retry "$CURRENT_STORY")
|
||||
echo " Story $CURRENT_STORY încă incomplet. Retries: $NEW_RETRY/3"
|
||||
if [ "$NEW_RETRY" -ge 3 ] 2>/dev/null; then
|
||||
echo " ❌ $CURRENT_STORY failed: max_retries — sare la următorul"
|
||||
# mark-failed e deja făcut de incr-retry când >=3, dar idempotent o re-aplicăm
|
||||
dag_mark_failed "$CURRENT_STORY" "max_retries"
|
||||
fi
|
||||
else
|
||||
echo " ✅ Story $CURRENT_STORY marcat passes=true în iterația asta."
|
||||
fi
|
||||
|
||||
echo " Iterația $i completă. Continuăm..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
267
tools/ralph_dag.py
Normal file
267
tools/ralph_dag.py
Normal file
@@ -0,0 +1,267 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Ralph DAG + tag-validation helpers (W3 smart gates).
|
||||
|
||||
Două responsabilități:
|
||||
1. Tag validation heuristic — anti-silent-regression. Forțează tags pe baza
|
||||
diff content (chiar dacă Opus le-a omis). Rulat ÎNAINTE de smart gate dispatch.
|
||||
2. DAG topological sort — alege următorul story eligibil (passes/failed/blocked
|
||||
propagation). Rulat de ralph.sh la începutul fiecărei iterații.
|
||||
|
||||
CLI subcommands (apelate din ralph.sh):
|
||||
|
||||
python3 ralph_dag.py infer-tags <story_id> <project_dir>
|
||||
→ printează tags inferate (newline-separated) pe baza git diff HEAD~1.
|
||||
|
||||
python3 ralph_dag.py next-story <prd.json>
|
||||
→ printează story_id eligibil (DAG-aware) sau exit 1 dacă nimic.
|
||||
|
||||
python3 ralph_dag.py mark-failed <prd.json> <story_id> <reason>
|
||||
→ marchează story.failed=true cu motiv; propagă blocked la dependenți.
|
||||
|
||||
python3 ralph_dag.py incr-retry <prd.json> <story_id>
|
||||
→ +1 retries; dacă >=3, mark failed cu reason="max_retries"; print new count.
|
||||
|
||||
python3 ralph_dag.py force-tags <prd.json> <story_id> <project_dir>
|
||||
→ adaugă tags inferate DIN DIFF în story.tags (idempotent, deduplicat);
|
||||
dacă tags sunt vide după → fallback la "run-all" (NU modifică, doar print "EMPTY").
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
|
||||
VALID_TAGS = {"ui", "db", "vercel", "refactor", "docs", "backend", "infra"}
|
||||
|
||||
# Heuristici diff → tag. Sortate stabil pentru determinism în teste.
|
||||
UI_PATTERN = re.compile(r'\.(vue|tsx|jsx|html|css|scss|svelte)$', re.IGNORECASE)
|
||||
DB_MIGRATIONS = re.compile(r'(^|/)migrations?/', re.IGNORECASE)
|
||||
DB_SQL = re.compile(r'\.sql$', re.IGNORECASE)
|
||||
|
||||
|
||||
def infer_tags_from_paths(paths: List[str], has_vercel_json: bool = False) -> List[str]:
|
||||
"""Pure function: dat list de file paths atinse + flag vercel.json, întoarce tags inferate.
|
||||
|
||||
Returnează lista deduplicată, ordonată: ui, db, vercel.
|
||||
"""
|
||||
tags = set()
|
||||
for p in paths:
|
||||
if not p:
|
||||
continue
|
||||
if UI_PATTERN.search(p):
|
||||
tags.add("ui")
|
||||
if DB_MIGRATIONS.search(p) or DB_SQL.search(p):
|
||||
tags.add("db")
|
||||
if has_vercel_json:
|
||||
tags.add("vercel")
|
||||
|
||||
# Ordine stabilă pentru determinism (teste + diff-uri reproducibile)
|
||||
return sorted(tags, key=lambda t: ("ui", "db", "vercel").index(t) if t in ("ui", "db", "vercel") else 99)
|
||||
|
||||
|
||||
def force_include_tags(existing_tags: List[str], diff_paths: List[str], has_vercel_json: bool) -> List[str]:
|
||||
"""Combinator: existing tags ∪ inferred din diff. Filtrează la VALID_TAGS.
|
||||
|
||||
Garanție anti-silent-regression: chiar dacă Opus a marcat story=docs, dacă diff
|
||||
atinge .vue/.tsx → ui e forțat. Story=docs care realmente atinge UI ar fi avut
|
||||
silent skip /qa.
|
||||
"""
|
||||
inferred = infer_tags_from_paths(diff_paths, has_vercel_json)
|
||||
combined = []
|
||||
seen = set()
|
||||
for t in list(existing_tags) + inferred:
|
||||
if t in VALID_TAGS and t not in seen:
|
||||
combined.append(t)
|
||||
seen.add(t)
|
||||
return combined
|
||||
|
||||
|
||||
def get_diff_paths(project_dir: Path, ref: str = "HEAD~1") -> List[str]:
|
||||
"""Întoarce file paths din `git diff --name-only <ref>` în project_dir.
|
||||
|
||||
Lista vidă dacă git nu e disponibil sau nu există commit anterior.
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "-C", str(project_dir), "diff", "--name-only", ref],
|
||||
capture_output=True, text=True, timeout=10,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
# Fallback: diff vs HEAD (uncommitted changes) — util pe primul commit
|
||||
result = subprocess.run(
|
||||
["git", "-C", str(project_dir), "diff", "--name-only", "HEAD"],
|
||||
capture_output=True, text=True, timeout=10,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return []
|
||||
return [p.strip() for p in result.stdout.splitlines() if p.strip()]
|
||||
except (subprocess.TimeoutExpired, FileNotFoundError):
|
||||
return []
|
||||
|
||||
|
||||
def topological_eligible(stories: List[dict]) -> Optional[dict]:
|
||||
"""Întoarce primul story eligibil (DAG-aware) — sau None dacă nimic.
|
||||
|
||||
Eligibil = !passes ∧ !failed ∧ !blocked ∧ toate `dependsOn` au `passes==True`.
|
||||
Dacă story.dependsOn conține un ID `failed` → story-ul DEVINE blocked (mutat in-place).
|
||||
Sortare: priority asc.
|
||||
"""
|
||||
by_id = {s.get("id"): s for s in stories}
|
||||
|
||||
# Pas 1: propagă blocked dacă vreun dep e failed
|
||||
changed = True
|
||||
while changed:
|
||||
changed = False
|
||||
for s in stories:
|
||||
if s.get("passes") or s.get("failed") or s.get("blocked"):
|
||||
continue
|
||||
for dep_id in s.get("dependsOn") or []:
|
||||
dep = by_id.get(dep_id)
|
||||
if dep and (dep.get("failed") or dep.get("blocked")):
|
||||
s["blocked"] = True
|
||||
s["failureReason"] = f"blocked_by:{dep_id}"
|
||||
changed = True
|
||||
break
|
||||
|
||||
# Pas 2: găsește story eligibil cu cea mai mică priority
|
||||
eligible = []
|
||||
for s in stories:
|
||||
if s.get("passes") or s.get("failed") or s.get("blocked"):
|
||||
continue
|
||||
deps = s.get("dependsOn") or []
|
||||
if all(by_id.get(d, {}).get("passes") for d in deps):
|
||||
eligible.append(s)
|
||||
|
||||
if not eligible:
|
||||
return None
|
||||
eligible.sort(key=lambda s: (s.get("priority", 999), s.get("id", "")))
|
||||
return eligible[0]
|
||||
|
||||
|
||||
def _load_prd(prd_path: Path) -> dict:
|
||||
with open(prd_path, encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def _save_prd(prd_path: Path, data: dict) -> None:
|
||||
"""Atomic write — temp file + rename, evită corruption mid-write."""
|
||||
tmp = prd_path.with_suffix(".json.tmp")
|
||||
with open(tmp, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2, ensure_ascii=False)
|
||||
tmp.replace(prd_path)
|
||||
|
||||
|
||||
def cmd_next_story(prd_path: Path) -> int:
|
||||
data = _load_prd(prd_path)
|
||||
stories = data.get("userStories", [])
|
||||
chosen = topological_eligible(stories)
|
||||
# Salvăm dacă topological_eligible a propagat blocked (mutație in-place)
|
||||
_save_prd(prd_path, data)
|
||||
if not chosen:
|
||||
return 1
|
||||
print(chosen.get("id", ""))
|
||||
return 0
|
||||
|
||||
|
||||
def cmd_mark_failed(prd_path: Path, story_id: str, reason: str) -> int:
|
||||
data = _load_prd(prd_path)
|
||||
found = False
|
||||
for s in data.get("userStories", []):
|
||||
if s.get("id") == story_id:
|
||||
s["failed"] = True
|
||||
s["passes"] = False
|
||||
s["failureReason"] = reason or "unknown"
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
print(f"Story {story_id} not found", file=sys.stderr)
|
||||
return 1
|
||||
# Propagate blocked la dependenți
|
||||
topological_eligible(data.get("userStories", []))
|
||||
_save_prd(prd_path, data)
|
||||
print(f"failed: {story_id} ({reason})")
|
||||
return 0
|
||||
|
||||
|
||||
def cmd_incr_retry(prd_path: Path, story_id: str) -> int:
|
||||
data = _load_prd(prd_path)
|
||||
for s in data.get("userStories", []):
|
||||
if s.get("id") == story_id:
|
||||
s["retries"] = int(s.get("retries", 0)) + 1
|
||||
new = s["retries"]
|
||||
if new >= 3:
|
||||
s["failed"] = True
|
||||
s["failureReason"] = "max_retries"
|
||||
topological_eligible(data.get("userStories", []))
|
||||
_save_prd(prd_path, data)
|
||||
print(new)
|
||||
return 0
|
||||
print(f"Story {story_id} not found", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
|
||||
def cmd_force_tags(prd_path: Path, story_id: str, project_dir: Path) -> int:
|
||||
data = _load_prd(prd_path)
|
||||
diff_paths = get_diff_paths(project_dir)
|
||||
has_vercel = (project_dir / "vercel.json").exists()
|
||||
|
||||
for s in data.get("userStories", []):
|
||||
if s.get("id") == story_id:
|
||||
existing = s.get("tags") or []
|
||||
forced = force_include_tags(existing, diff_paths, has_vercel)
|
||||
s["tags"] = forced
|
||||
_save_prd(prd_path, data)
|
||||
if not forced:
|
||||
print("EMPTY") # ralph.sh interpretează ca run-all-gates fallback
|
||||
else:
|
||||
for t in forced:
|
||||
print(t)
|
||||
return 0
|
||||
print(f"Story {story_id} not found", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
|
||||
def cmd_infer_tags(story_id: str, project_dir: Path) -> int:
|
||||
"""Variant care NU modifică prd.json — doar print tags inferate din diff curent."""
|
||||
diff_paths = get_diff_paths(project_dir)
|
||||
has_vercel = (project_dir / "vercel.json").exists()
|
||||
inferred = infer_tags_from_paths(diff_paths, has_vercel)
|
||||
for t in inferred:
|
||||
print(t)
|
||||
return 0 if inferred else 1
|
||||
|
||||
|
||||
def main() -> int:
|
||||
if len(sys.argv) < 2:
|
||||
print(__doc__)
|
||||
return 2
|
||||
cmd = sys.argv[1]
|
||||
args = sys.argv[2:]
|
||||
try:
|
||||
if cmd == "next-story" and len(args) == 1:
|
||||
return cmd_next_story(Path(args[0]))
|
||||
if cmd == "mark-failed" and len(args) == 3:
|
||||
return cmd_mark_failed(Path(args[0]), args[1], args[2])
|
||||
if cmd == "incr-retry" and len(args) == 2:
|
||||
return cmd_incr_retry(Path(args[0]), args[1])
|
||||
if cmd == "force-tags" and len(args) == 3:
|
||||
return cmd_force_tags(Path(args[0]), args[1], Path(args[2]))
|
||||
if cmd == "infer-tags" and len(args) == 2:
|
||||
return cmd_infer_tags(args[0], Path(args[1]))
|
||||
print(f"Unknown command: {cmd}", file=sys.stderr)
|
||||
print(__doc__, file=sys.stderr)
|
||||
return 2
|
||||
except FileNotFoundError as exc:
|
||||
print(f"File not found: {exc}", file=sys.stderr)
|
||||
return 3
|
||||
except json.JSONDecodeError as exc:
|
||||
print(f"Invalid JSON: {exc}", file=sys.stderr)
|
||||
return 3
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,13 +1,151 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Ralph PRD Generator - implementare Python a skill-ului ralph-prd
|
||||
Generează PRD și prd.json fără să apeleze Claude Code
|
||||
Generează PRD și prd.json din descriere (heuristic) sau din final-plan.md (Opus).
|
||||
|
||||
Schema extinsă (W3 / smart gates + DAG):
|
||||
- tags[] : "ui" | "db" | "vercel" | "refactor" | "docs" | "backend" | "infra"
|
||||
- dependsOn[] : alte story IDs (DAG topological sort)
|
||||
- acceptanceCriteria: 3-5 criterii verificabile concret
|
||||
- passes/failed/blocked/retries: state pentru ralph.sh loop guard
|
||||
- failureReason : populat când failed=true (ex: "rate_limited", "max_retries")
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
|
||||
# Constants pentru smart gates dispatcher
|
||||
VALID_TAGS = {"ui", "db", "vercel", "refactor", "docs", "backend", "infra"}
|
||||
CLAUDE_BIN = os.environ.get("CLAUDE_BIN", "claude")
|
||||
OPUS_TIMEOUT = int(os.environ.get("RALPH_PRD_OPUS_TIMEOUT", "600")) # 10 min
|
||||
|
||||
|
||||
def _normalize_story(story: dict, idx: int = 0) -> dict:
|
||||
"""Asigură că un story dict are toate câmpurile schemei extinse W3."""
|
||||
sid = story.get("id") or f"US-{idx + 1:03d}"
|
||||
title = (story.get("title") or "").strip() or f"Story {sid}"
|
||||
description = (story.get("description") or "").strip() or title
|
||||
|
||||
# Tags: filter la VALID_TAGS, păstrează ordine
|
||||
raw_tags = story.get("tags") or []
|
||||
tags = [t for t in raw_tags if isinstance(t, str) and t in VALID_TAGS]
|
||||
|
||||
# dependsOn: lista de story IDs (string-uri)
|
||||
raw_deps = story.get("dependsOn") or []
|
||||
depends_on = [d for d in raw_deps if isinstance(d, str) and d.strip()]
|
||||
|
||||
# acceptance criteria: cel puțin 1, ideal 3-5
|
||||
raw_ac = story.get("acceptanceCriteria") or []
|
||||
acceptance = [c.strip() for c in raw_ac if isinstance(c, str) and c.strip()]
|
||||
if not acceptance:
|
||||
acceptance = ["Funcționalitatea implementată conform descrierii"]
|
||||
|
||||
return {
|
||||
"id": sid,
|
||||
"title": title,
|
||||
"description": description,
|
||||
"priority": int(story.get("priority") or (idx + 1) * 10),
|
||||
"acceptanceCriteria": acceptance,
|
||||
"tags": tags,
|
||||
"dependsOn": depends_on,
|
||||
"requiresBrowserCheck": bool(story.get("requiresBrowserCheck", "ui" in tags)),
|
||||
"requiresDesignReview": bool(story.get("requiresDesignReview", False)),
|
||||
"passes": bool(story.get("passes", False)),
|
||||
"failed": bool(story.get("failed", False)),
|
||||
"blocked": bool(story.get("blocked", False)),
|
||||
"retries": int(story.get("retries") or 0),
|
||||
"failureReason": story.get("failureReason") or "",
|
||||
"notes": story.get("notes") or "",
|
||||
}
|
||||
|
||||
|
||||
def extract_stories_from_final_plan(final_plan_path: Path) -> Optional[list]:
|
||||
"""Invocă Claude CLI (Opus) pe final-plan.md și extrage user stories în schema extinsă.
|
||||
|
||||
Returnează listă de stories normalizate, sau None dacă invocarea eșuează.
|
||||
Backward-compat: caller-ul poate fallback la heuristic dacă None.
|
||||
"""
|
||||
if not final_plan_path.exists():
|
||||
return None
|
||||
|
||||
try:
|
||||
plan_content = final_plan_path.read_text(encoding="utf-8")
|
||||
except (OSError, UnicodeDecodeError):
|
||||
return None
|
||||
|
||||
valid_tags_csv = ", ".join(sorted(VALID_TAGS))
|
||||
prompt = (
|
||||
f"Citește final-plan.md de mai jos și extrage user stories implementabile autonom de către un agent AI (Ralph).\n\n"
|
||||
f"Pentru fiecare story extrage:\n"
|
||||
f"- id (format US-001, US-002...)\n"
|
||||
f"- title (scurt, imperativ)\n"
|
||||
f"- description (1-2 propoziții, ce face story-ul)\n"
|
||||
f"- acceptanceCriteria[] (3-5 criterii verificabile concret — comenzi care trebuie să iasă PASS, fișiere create, comportament observabil)\n"
|
||||
f"- tags[] (subset din: {valid_tags_csv}; un story poate avea 1-3 tags)\n"
|
||||
f"- dependsOn[] (alte story IDs de care depinde — pentru DAG topological sort; goală dacă independent)\n"
|
||||
f"- priority (10, 20, 30... în ordinea din plan)\n\n"
|
||||
f"Reguli:\n"
|
||||
f"- Fiecare story IMPLEMENTABIL — nu task-uri de research sau design (alea s-au făcut deja în plan).\n"
|
||||
f"- Tags ghidează Ralph-ul să ruleze gates corecte: ui→/qa Playwright, db→schema diff, vercel→PR checks, refactor→/workflow:simplify.\n"
|
||||
f"- dependsOn pentru ordering real (US-002 are nevoie de DB-ul din US-001) — NU pentru tot ce vine după.\n"
|
||||
f"- Nu inventa stories peste plan; extrage doar ce e acolo.\n\n"
|
||||
f"Răspunde DOAR cu JSON valid (fără markdown fence, fără comentarii) în formatul:\n"
|
||||
f'{{"userStories": [{{"id":"US-001","title":"...","description":"...","acceptanceCriteria":["..."],"tags":["..."],"dependsOn":[],"priority":10}}, ...]}}\n\n'
|
||||
f"=== FINAL PLAN ===\n{plan_content}\n=== END PLAN ===\n"
|
||||
)
|
||||
|
||||
cmd = [
|
||||
CLAUDE_BIN, "-p", prompt,
|
||||
"--model", "opus",
|
||||
"--output-format", "json",
|
||||
]
|
||||
|
||||
try:
|
||||
proc = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=OPUS_TIMEOUT,
|
||||
)
|
||||
except (subprocess.TimeoutExpired, FileNotFoundError) as exc:
|
||||
print(f"⚠️ Opus extraction failed: {exc}")
|
||||
return None
|
||||
|
||||
if proc.returncode != 0:
|
||||
print(f"⚠️ Opus exit {proc.returncode}: {proc.stderr[:300]}")
|
||||
return None
|
||||
|
||||
# Claude --output-format json wrap-uiește răspunsul în {"result": "..."}
|
||||
raw_result = proc.stdout
|
||||
try:
|
||||
wrapper = json.loads(raw_result)
|
||||
result_text = wrapper.get("result", raw_result) if isinstance(wrapper, dict) else raw_result
|
||||
except json.JSONDecodeError:
|
||||
result_text = raw_result
|
||||
|
||||
# Caută primul block JSON în răspuns (defensiv contra prefix/suffix text)
|
||||
match = re.search(r'\{[\s\S]*"userStories"[\s\S]*\}', result_text)
|
||||
if not match:
|
||||
print(f"⚠️ Niciun JSON cu userStories în output Opus")
|
||||
return None
|
||||
|
||||
try:
|
||||
parsed = json.loads(match.group(0))
|
||||
except json.JSONDecodeError as exc:
|
||||
print(f"⚠️ JSON parse error în output Opus: {exc}")
|
||||
return None
|
||||
|
||||
raw_stories = parsed.get("userStories") or []
|
||||
if not isinstance(raw_stories, list) or not raw_stories:
|
||||
return None
|
||||
|
||||
return [_normalize_story(s, i) for i, s in enumerate(raw_stories)]
|
||||
|
||||
|
||||
def detect_project_context(project_dir: Path):
|
||||
@@ -275,23 +413,33 @@ def prd_to_stories(prd_content: str, project_name: str):
|
||||
|
||||
# Detectează dacă necesită browser check (pentru UI)
|
||||
requires_browser = 'ui' in title.lower() or 'interface' in title.lower()
|
||||
# Heuristic tags din titlu
|
||||
tags_inferred = []
|
||||
title_lower = title.lower()
|
||||
if requires_browser or 'ui' in title_lower or 'frontend' in title_lower:
|
||||
tags_inferred.append('ui')
|
||||
if 'database' in title_lower or 'schema' in title_lower or 'migration' in title_lower:
|
||||
tags_inferred.append('db')
|
||||
if 'refactor' in title_lower or 'cleanup' in title_lower:
|
||||
tags_inferred.append('refactor')
|
||||
if 'doc' in title_lower or 'readme' in title_lower:
|
||||
tags_inferred.append('docs')
|
||||
|
||||
story = {
|
||||
story = _normalize_story({
|
||||
"id": story_id,
|
||||
"title": title,
|
||||
"description": f"Ca {user_type}, vreau {want} pentru că {because}",
|
||||
"priority": priority,
|
||||
"acceptanceCriteria": criteria,
|
||||
"tags": tags_inferred,
|
||||
"dependsOn": [],
|
||||
"requiresBrowserCheck": requires_browser,
|
||||
"passes": False,
|
||||
"notes": ""
|
||||
}
|
||||
|
||||
}, idx=int(story_id.split('-')[-1]) - 1 if story_id.startswith("US-") else 0)
|
||||
stories.append(story)
|
||||
|
||||
# Dacă nu găsim stories (regex failed), generăm basic
|
||||
if not stories:
|
||||
stories = [{
|
||||
stories = [_normalize_story({
|
||||
"id": "US-001",
|
||||
"title": "Implementare funcționalitate principală",
|
||||
"description": f"Implementează {project_name}",
|
||||
@@ -301,10 +449,9 @@ def prd_to_stories(prd_content: str, project_name: str):
|
||||
"Tests passing",
|
||||
"Lint + typecheck pass"
|
||||
],
|
||||
"requiresBrowserCheck": False,
|
||||
"passes": False,
|
||||
"notes": ""
|
||||
}]
|
||||
"tags": [],
|
||||
"dependsOn": [],
|
||||
}, idx=0)]
|
||||
|
||||
return stories
|
||||
|
||||
@@ -357,9 +504,22 @@ def detect_tech_stack_commands(project_dir: Path, context: dict):
|
||||
}
|
||||
|
||||
|
||||
def create_prd_and_json(project_name: str, description: str, workspace_dir: Path):
|
||||
def create_prd_and_json(
|
||||
project_name: str,
|
||||
description: str,
|
||||
workspace_dir: Path,
|
||||
final_plan_path: Optional[Path] = None,
|
||||
):
|
||||
"""
|
||||
Generează PRD markdown și prd.json pentru un proiect
|
||||
Generează PRD markdown și prd.json pentru un proiect.
|
||||
|
||||
Args:
|
||||
project_name: slug proiect (folder în workspace_dir)
|
||||
description: descriere scurtă (folosită ca fallback și pentru PRD markdown)
|
||||
workspace_dir: rădăcina workspace (default ~/workspace/)
|
||||
final_plan_path: opțional, calea către final-plan.md produs de planning agent (W2);
|
||||
când e furnizat, user stories sunt extrase prin Claude Opus din plan;
|
||||
când e None, păstrăm comportamentul vechi (heuristic din description).
|
||||
|
||||
Returns:
|
||||
tuple: (prd_file_path, prd_json_path) sau (None, None) dacă eroare
|
||||
@@ -375,8 +535,10 @@ def create_prd_and_json(project_name: str, description: str, workspace_dir: Path
|
||||
if context['stack_type']:
|
||||
print(f" Stack: {context['stack_type']}")
|
||||
print(f" Config: {context['config_file']}")
|
||||
if final_plan_path:
|
||||
print(f" Final plan: {final_plan_path}")
|
||||
|
||||
# Generează PRD markdown
|
||||
# Generează PRD markdown (mereu — folosit pentru read humans)
|
||||
prd_content = generate_prd_markdown(project_name, description, context)
|
||||
|
||||
# Salvează PRD
|
||||
@@ -389,8 +551,20 @@ def create_prd_and_json(project_name: str, description: str, workspace_dir: Path
|
||||
|
||||
print(f"✅ PRD salvat: {prd_file}")
|
||||
|
||||
# Generează prd.json
|
||||
stories = prd_to_stories(prd_content, project_name)
|
||||
# Generează stories — preferă Opus din final-plan.md când disponibil
|
||||
stories = None
|
||||
if final_plan_path is not None:
|
||||
plan = Path(final_plan_path) if not isinstance(final_plan_path, Path) else final_plan_path
|
||||
print(f"🧠 Extrag stories din final-plan.md cu Opus...")
|
||||
stories = extract_stories_from_final_plan(plan)
|
||||
if stories:
|
||||
print(f" ↳ {len(stories)} stories extrase din plan")
|
||||
else:
|
||||
print(f" ↳ Opus extraction eșuat — fallback la heuristic")
|
||||
|
||||
if not stories:
|
||||
stories = prd_to_stories(prd_content, project_name)
|
||||
|
||||
tech_stack = detect_tech_stack_commands(project_dir, context)
|
||||
|
||||
prd_json_data = {
|
||||
@@ -462,17 +636,20 @@ if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: python ralph_prd_generator.py PROJECT_NAME 'description'")
|
||||
print("Usage: python ralph_prd_generator.py PROJECT_NAME 'description' [final_plan_path]")
|
||||
sys.exit(1)
|
||||
|
||||
project_name = sys.argv[1]
|
||||
description = sys.argv[2]
|
||||
final_plan_arg = Path(sys.argv[3]) if len(sys.argv) > 3 else None
|
||||
workspace = Path.home() / "workspace"
|
||||
|
||||
print(f"🔄 Generez PRD pentru {project_name}")
|
||||
print("=" * 70)
|
||||
|
||||
prd_file, prd_json = create_prd_and_json(project_name, description, workspace)
|
||||
prd_file, prd_json = create_prd_and_json(
|
||||
project_name, description, workspace, final_plan_path=final_plan_arg
|
||||
)
|
||||
|
||||
if prd_file and prd_json:
|
||||
print("\n" + "=" * 70)
|
||||
|
||||
Reference in New Issue
Block a user