cleanup: remove clawd/openclaw references, fix permissions, add architecture docs

- Replace all ~/clawd and ~/.clawdbot paths with ~/echo-core equivalents
  in tools (git_commit, ralph_prd_generator, backup_config, lead-gen)
- Update personality files: TOOLS.md repo/paths, AGENTS.md security audit cmd
- Migrate HANDOFF.md architectural decisions to docs/architecture.md
- Tighten credentials/ dir to 700, add to .gitignore
- Add .claude/ and *.pid to .gitignore
- Various adapter, router, and session improvements from prior work

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
MoltBot Service
2026-02-14 21:44:13 +00:00
parent d585c85081
commit 5928077646
35 changed files with 666 additions and 790 deletions

View File

@@ -721,15 +721,33 @@ def create_bot(config: Config) -> discord.Client:
# React to acknowledge receipt
await message.add_reaction("\U0001f440")
# Track how many intermediate messages were sent via callback
sent_count = 0
loop = asyncio.get_event_loop()
def on_text(text_block: str) -> None:
"""Send intermediate Claude text blocks to the channel."""
nonlocal sent_count
chunks = split_message(text_block)
for chunk in chunks:
asyncio.run_coroutine_threadsafe(
message.channel.send(chunk), loop
)
sent_count += 1
try:
async with message.channel.typing():
response, _is_cmd = await asyncio.to_thread(
route_message, channel_id, user_id, text
route_message, channel_id, user_id, text,
on_text=on_text,
)
chunks = split_message(response)
for chunk in chunks:
await message.channel.send(chunk)
# Only send the final combined response if no intermediates
# were delivered (avoids duplicating content).
if sent_count == 0:
chunks = split_message(response)
for chunk in chunks:
await message.channel.send(chunk)
except Exception:
logger.exception("Error processing message from %s", message.author)
await message.channel.send(

View File

@@ -331,14 +331,31 @@ async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE) ->
# Show typing indicator
await context.bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
# Track intermediate messages sent via callback
sent_count = 0
loop = asyncio.get_event_loop()
def on_text(text_block: str) -> None:
"""Send intermediate Claude text blocks to the chat."""
nonlocal sent_count
chunks = split_message(text_block)
for chunk in chunks:
asyncio.run_coroutine_threadsafe(
context.bot.send_message(chat_id=chat_id, text=chunk), loop
)
sent_count += 1
try:
response, _is_cmd = await asyncio.to_thread(
route_message, str(chat_id), str(user_id), text
route_message, str(chat_id), str(user_id), text,
on_text=on_text,
)
chunks = split_message(response)
for chunk in chunks:
await message.reply_text(chunk)
# Only send combined response if no intermediates were delivered
if sent_count == 0:
chunks = split_message(response)
for chunk in chunks:
await message.reply_text(chunk)
except Exception:
logger.exception("Error processing Telegram message from %s", user_id)
await message.reply_text("Sorry, something went wrong processing your message.")

View File

@@ -104,6 +104,26 @@ async def send_whatsapp(client: httpx.AsyncClient, to: str, text: str) -> bool:
return False
async def react_whatsapp(
client: httpx.AsyncClient, to: str, message_id: str, emoji: str,
*, from_me: bool = False, participant: str | None = None,
) -> bool:
"""React to a WhatsApp message via the bridge."""
try:
payload: dict = {"to": to, "id": message_id, "emoji": emoji, "fromMe": from_me}
if participant:
payload["participant"] = participant
resp = await client.post(
f"{_bridge_url}/react",
json=payload,
timeout=10,
)
return resp.status_code == 200 and resp.json().get("ok", False)
except Exception as e:
log.debug("React error: %s", e)
return False
async def get_bridge_status(client: httpx.AsyncClient) -> dict | None:
"""Get bridge connection status."""
try:
@@ -174,19 +194,53 @@ async def handle_incoming(msg: dict, client: httpx.AsyncClient) -> None:
return
# Identify sender for logging/routing
participant = msg.get("participant") or sender
user_id = participant.split("@")[0]
participant_jid = msg.get("participant") or sender
user_id = participant_jid.split("@")[0]
message_id = msg.get("id")
from_me = msg.get("fromMe", False)
# React with 👀 to acknowledge receipt
if message_id:
await react_whatsapp(
client, sender, message_id, "\U0001f440",
from_me=from_me,
participant=msg.get("participant"),
)
# Route to Claude via router (handles /model and regular messages)
log.info("Message from %s (%s): %.50s", user_id, push_name, text)
# Track intermediate messages sent via callback
sent_count = 0
loop = asyncio.get_event_loop()
def on_text(text_block: str) -> None:
"""Send intermediate Claude text blocks to the sender."""
nonlocal sent_count
asyncio.run_coroutine_threadsafe(
send_whatsapp(client, sender, text_block), loop
)
sent_count += 1
try:
response, _is_cmd = await asyncio.to_thread(
route_message, channel_id, user_id, text
route_message, channel_id, user_id, text,
on_text=on_text,
)
await send_whatsapp(client, sender, response)
# Only send combined response if no intermediates were delivered
if sent_count == 0:
await send_whatsapp(client, sender, response)
except Exception as e:
log.error("Error handling message from %s: %s", user_id, e)
await send_whatsapp(client, sender, "Sorry, an error occurred.")
finally:
# Remove eyes reaction after responding
if message_id:
await react_whatsapp(
client, sender, message_id, "",
from_me=from_me,
participant=msg.get("participant"),
)
# --- Main loop ---
@@ -223,12 +277,12 @@ async def run_whatsapp(config: Config, bridge_url: str = "http://127.0.0.1:8098"
log.info("WhatsApp adapter polling started")
# Polling loop
# Polling loop — concurrent message processing
while _running:
try:
messages = await poll_messages(client)
for msg in messages:
await handle_incoming(msg, client)
asyncio.create_task(handle_incoming(msg, client))
except asyncio.CancelledError:
break
except Exception as e:

View File

@@ -12,9 +12,11 @@ import os
import shutil
import subprocess
import tempfile
import threading
import time
from datetime import datetime, timezone
from pathlib import Path
from typing import Callable
logger = logging.getLogger(__name__)
_invoke_log = logging.getLogger("echo-core.invoke")
@@ -31,7 +33,7 @@ _SESSIONS_FILE = SESSIONS_DIR / "active.json"
VALID_MODELS = {"haiku", "sonnet", "opus"}
DEFAULT_MODEL = "sonnet"
DEFAULT_TIMEOUT = 120 # seconds
DEFAULT_TIMEOUT = 300 # seconds
CLAUDE_BIN = os.environ.get("CLAUDE_BIN", "claude")
@@ -156,12 +158,20 @@ def _save_sessions(data: dict) -> None:
raise
def _run_claude(cmd: list[str], timeout: int) -> dict:
def _run_claude(
cmd: list[str],
timeout: int,
on_text: Callable[[str], None] | None = None,
) -> dict:
"""Run a Claude CLI command and return parsed output.
Expects ``--output-format stream-json --verbose``. Parses the newline-
delimited JSON stream, collecting every text block from ``assistant``
messages and metadata from the final ``result`` line.
If *on_text* is provided it is called with each intermediate text block
as soon as it arrives (before the process finishes), enabling real-time
streaming to adapters.
"""
if not shutil.which(CLAUDE_BIN):
raise FileNotFoundError(
@@ -169,59 +179,92 @@ def _run_claude(cmd: list[str], timeout: int) -> dict:
"Install: https://docs.anthropic.com/en/docs/claude-code"
)
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
env=_safe_env(),
cwd=PROJECT_ROOT,
)
# Watchdog thread: kill the process if it exceeds the timeout
timed_out = threading.Event()
def _watchdog():
try:
proc.wait(timeout=timeout)
except subprocess.TimeoutExpired:
timed_out.set()
try:
proc.kill()
except OSError:
pass
watchdog = threading.Thread(target=_watchdog, daemon=True)
watchdog.start()
# --- Parse stream-json output line by line ---
text_blocks: list[str] = []
result_obj: dict | None = None
intermediate_count = 0
try:
proc = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=timeout,
env=_safe_env(),
cwd=PROJECT_ROOT,
)
except subprocess.TimeoutExpired:
for line in proc.stdout:
line = line.strip()
if not line:
continue
try:
obj = json.loads(line)
except json.JSONDecodeError:
continue
msg_type = obj.get("type")
if msg_type == "assistant":
message = obj.get("message", {})
for block in message.get("content", []):
if block.get("type") == "text":
text = block.get("text", "").strip()
if text:
text_blocks.append(text)
if on_text:
try:
on_text(text)
intermediate_count += 1
except Exception:
logger.exception("on_text callback error")
elif msg_type == "result":
result_obj = obj
finally:
# Ensure process resources are cleaned up
proc.stdout.close()
try:
proc.wait(timeout=30)
except subprocess.TimeoutExpired:
proc.kill()
proc.wait()
stderr_output = proc.stderr.read()
proc.stderr.close()
if timed_out.is_set():
raise TimeoutError(f"Claude CLI timed out after {timeout}s")
if proc.returncode != 0:
detail = proc.stderr[:500] or proc.stdout[:500]
logger.error("Claude CLI stdout: %s", proc.stdout[:1000])
logger.error("Claude CLI stderr: %s", proc.stderr[:1000])
stdout_tail = "\n".join(text_blocks[-3:]) if text_blocks else ""
detail = stderr_output[:500] or stdout_tail[:500]
logger.error("Claude CLI stderr: %s", stderr_output[:1000])
raise RuntimeError(
f"Claude CLI error (exit {proc.returncode}): {detail}"
)
# --- Parse stream-json output ---
text_blocks: list[str] = []
result_obj: dict | None = None
for line in proc.stdout.splitlines():
line = line.strip()
if not line:
continue
try:
obj = json.loads(line)
except json.JSONDecodeError:
continue
msg_type = obj.get("type")
if msg_type == "assistant":
# Extract text from content blocks
message = obj.get("message", {})
for block in message.get("content", []):
if block.get("type") == "text":
text = block.get("text", "").strip()
if text:
text_blocks.append(text)
elif msg_type == "result":
result_obj = obj
if result_obj is None:
raise RuntimeError(
"Failed to parse Claude CLI output: no result line in stream"
)
# Build a dict compatible with the old json output format
combined_text = "\n\n".join(text_blocks) if text_blocks else result_obj.get("result", "")
return {
@@ -232,6 +275,7 @@ def _run_claude(cmd: list[str], timeout: int) -> dict:
"cost_usd": result_obj.get("cost_usd", 0),
"duration_ms": result_obj.get("duration_ms", 0),
"num_turns": result_obj.get("num_turns", 0),
"intermediate_count": intermediate_count,
}
@@ -273,10 +317,14 @@ def start_session(
message: str,
model: str = DEFAULT_MODEL,
timeout: int = DEFAULT_TIMEOUT,
on_text: Callable[[str], None] | None = None,
) -> tuple[str, str]:
"""Start a new Claude CLI session for a channel.
Returns (response_text, session_id).
If *on_text* is provided, each intermediate Claude text block is passed
to the callback as soon as it arrives.
"""
if model not in VALID_MODELS:
raise ValueError(
@@ -297,7 +345,7 @@ def start_session(
]
_t0 = time.monotonic()
data = _run_claude(cmd, timeout)
data = _run_claude(cmd, timeout, on_text=on_text)
_elapsed_ms = int((time.monotonic() - _t0) * 1000)
for field in ("result", "session_id"):
@@ -342,8 +390,13 @@ def resume_session(
session_id: str,
message: str,
timeout: int = DEFAULT_TIMEOUT,
on_text: Callable[[str], None] | None = None,
) -> str:
"""Resume an existing Claude session by ID. Returns response text."""
"""Resume an existing Claude session by ID. Returns response text.
If *on_text* is provided, each intermediate Claude text block is passed
to the callback as soon as it arrives.
"""
# Find channel/model for logging
sessions = _load_sessions()
_log_channel = "?"
@@ -365,7 +418,7 @@ def resume_session(
]
_t0 = time.monotonic()
data = _run_claude(cmd, timeout)
data = _run_claude(cmd, timeout, on_text=on_text)
_elapsed_ms = int((time.monotonic() - _t0) * 1000)
if not data.get("result"):
@@ -407,13 +460,14 @@ def send_message(
message: str,
model: str = DEFAULT_MODEL,
timeout: int = DEFAULT_TIMEOUT,
on_text: Callable[[str], None] | None = None,
) -> str:
"""High-level convenience: auto start or resume based on channel state."""
session = get_active_session(channel_id)
if session is not None:
return resume_session(session["session_id"], message, timeout)
return resume_session(session["session_id"], message, timeout, on_text=on_text)
response_text, _session_id = start_session(
channel_id, message, model, timeout
channel_id, message, model, timeout, on_text=on_text
)
return response_text

View File

@@ -1,6 +1,8 @@
"""Echo Core message router — routes messages to Claude or commands."""
import logging
from typing import Callable
from src.config import Config
from src.claude_session import (
send_message,
@@ -25,11 +27,20 @@ def _get_config() -> Config:
return _config
def route_message(channel_id: str, user_id: str, text: str, model: str | None = None) -> tuple[str, bool]:
def route_message(
channel_id: str,
user_id: str,
text: str,
model: str | None = None,
on_text: Callable[[str], None] | None = None,
) -> tuple[str, bool]:
"""Route an incoming message. Returns (response_text, is_command).
If text starts with / it's a command (handled here for text-based commands).
Otherwise it goes to Claude via send_message (auto start/resume).
*on_text* — optional callback invoked with each intermediate text block
from Claude, enabling real-time streaming to the adapter.
"""
text = text.strip()
@@ -61,7 +72,7 @@ def route_message(channel_id: str, user_id: str, text: str, model: str | None =
model = (channel_cfg or {}).get("default_model") or _get_config().get("bot.default_model", "sonnet")
try:
response = send_message(channel_id, text, model=model)
response = send_message(channel_id, text, model=model, on_text=on_text)
return response, False
except Exception as e:
log.error("Claude error for channel %s: %s", channel_id, e)