Initial commit - workspace setup

- AGENTS.md, SOUL.md, USER.md, IDENTITY.md
- ANAF monitor (declarații fiscale)
- Kanban board + Notes UI
- Email tools
- Memory system
This commit is contained in:
Echo
2026-01-29 13:11:59 +00:00
commit f9912e0081
52 changed files with 23148 additions and 0 deletions

101
tools/email_check.py Normal file
View File

@@ -0,0 +1,101 @@
#!/usr/bin/env python3
"""
IMAP inbox checker for moltbot@romfast.ro
Returns unread emails as JSON
"""
import imaplib
import email
from email.header import decode_header
import json
import sys
from datetime import datetime
# IMAP Configuration
IMAP_SERVER = "mail.romfast.ro"
IMAP_PORT = 993
IMAP_USER = "moltbot@romfast.ro"
IMAP_PASS = "parola281234"
def decode_mime_header(header):
"""Decode MIME encoded header"""
if not header:
return ""
decoded = decode_header(header)
result = []
for part, encoding in decoded:
if isinstance(part, bytes):
result.append(part.decode(encoding or 'utf-8', errors='replace'))
else:
result.append(part)
return ''.join(result)
def get_email_body(msg):
"""Extract email body text"""
body = ""
if msg.is_multipart():
for part in msg.walk():
content_type = part.get_content_type()
if content_type == "text/plain":
try:
body = part.get_payload(decode=True).decode('utf-8', errors='replace')
break
except:
pass
else:
try:
body = msg.get_payload(decode=True).decode('utf-8', errors='replace')
except:
pass
return body[:2000] # Limit body length
def check_inbox(unread_only=True, limit=10):
"""Check inbox and return emails"""
try:
# Connect to IMAP
mail = imaplib.IMAP4_SSL(IMAP_SERVER, IMAP_PORT)
mail.login(IMAP_USER, IMAP_PASS)
mail.select("INBOX")
# Search for emails
criteria = "UNSEEN" if unread_only else "ALL"
status, messages = mail.search(None, criteria)
if status != "OK":
return {"ok": False, "error": "Search failed"}
email_ids = messages[0].split()
email_ids = email_ids[-limit:] # Get last N
emails = []
for eid in reversed(email_ids): # Newest first
status, msg_data = mail.fetch(eid, "(RFC822)")
if status != "OK":
continue
raw_email = msg_data[0][1]
msg = email.message_from_bytes(raw_email)
emails.append({
"id": eid.decode(),
"from": decode_mime_header(msg["From"]),
"subject": decode_mime_header(msg["Subject"]),
"date": msg["Date"],
"body_preview": get_email_body(msg)[:500]
})
mail.logout()
return {
"ok": True,
"unread_count": len(emails),
"emails": emails
}
except Exception as e:
return {"ok": False, "error": str(e)}
if __name__ == "__main__":
unread = "--all" not in sys.argv
result = check_inbox(unread_only=unread)
print(json.dumps(result, indent=2, ensure_ascii=False))

58
tools/email_send.py Normal file
View File

@@ -0,0 +1,58 @@
#!/usr/bin/env python3
"""
Simple SMTP email sender for moltbot@romfast.ro
Usage: python3 email_send.py "recipient@email.com" "Subject" "Body text"
"""
import smtplib
import ssl
import sys
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
# SMTP Configuration
SMTP_SERVER = "mail.romfast.ro"
SMTP_PORT = 465
SMTP_USER = "moltbot@romfast.ro"
SMTP_PASS = "parola281234"
FROM_NAME = "Echo (Moltbot)"
def send_email(to_email: str, subject: str, body: str, html: bool = False) -> dict:
"""Send an email via SMTP SSL"""
try:
# Create message
msg = MIMEMultipart("alternative")
msg["Subject"] = subject
msg["From"] = f"{FROM_NAME} <{SMTP_USER}>"
msg["To"] = to_email
# Attach body
if html:
msg.attach(MIMEText(body, "html", "utf-8"))
else:
msg.attach(MIMEText(body, "plain", "utf-8"))
# Connect and send
context = ssl.create_default_context()
with smtplib.SMTP_SSL(SMTP_SERVER, SMTP_PORT, context=context) as server:
server.login(SMTP_USER, SMTP_PASS)
server.sendmail(SMTP_USER, to_email, msg.as_string())
return {"ok": True, "to": to_email, "subject": subject}
except Exception as e:
return {"ok": False, "error": str(e)}
if __name__ == "__main__":
if len(sys.argv) < 4:
print("Usage: python3 email_send.py <to> <subject> <body>")
sys.exit(1)
to = sys.argv[1]
subject = sys.argv[2]
body = sys.argv[3]
result = send_email(to, subject, body)
import json
print(json.dumps(result))

View File

@@ -0,0 +1,75 @@
#!/usr/bin/env python3
"""
Regenerates the notes index in notes.html based on files in notes/youtube/
Run after adding new notes.
"""
import os
import re
import json
from pathlib import Path
NOTES_DIR = Path(__file__).parent.parent / 'notes' / 'youtube'
NOTES_HTML = Path(__file__).parent.parent / 'kanban' / 'notes.html'
def extract_metadata(filepath):
"""Extract title, date, tags from markdown file."""
with open(filepath, 'r', encoding='utf-8') as f:
content = f.read()
# Extract title (first # heading)
title_match = re.search(r'^# (.+)$', content, re.MULTILINE)
title = title_match.group(1) if title_match else filepath.stem
# Extract date from filename (YYYY-MM-DD_...)
date_match = re.match(r'(\d{4}-\d{2}-\d{2})', filepath.name)
date = date_match.group(1) if date_match else "Unknown"
# Extract tags
tags_match = re.search(r'\*\*Tags?:\*\*\s*(.+)$', content, re.MULTILINE)
if tags_match:
tags = [t.strip().replace('#', '') for t in tags_match.group(1).split(',')]
else:
# Try to find hashtags
tags = re.findall(r'#(\w+)', content)[:5]
return {
'file': filepath.name,
'title': title[:50], # Truncate long titles
'date': date,
'tags': tags[:4] # Max 4 tags
}
def update_index():
"""Scan notes directory and update the HTML index."""
if not NOTES_DIR.exists():
print(f"Notes directory not found: {NOTES_DIR}")
return
# Get all markdown files
notes = []
for f in sorted(NOTES_DIR.glob('*.md'), reverse=True): # Newest first
notes.append(extract_metadata(f))
# Read current HTML
with open(NOTES_HTML, 'r', encoding='utf-8') as f:
html = f.read()
# Update the notesIndex
index_json = json.dumps(notes, indent=12, ensure_ascii=False)
# Replace the notesIndex in HTML
pattern = r'const notesIndex = \[[\s\S]*?\];'
replacement = f'const notesIndex = {index_json};'
new_html = re.sub(pattern, replacement, html)
with open(NOTES_HTML, 'w', encoding='utf-8') as f:
f.write(new_html)
print(f"Updated index with {len(notes)} notes:")
for n in notes:
print(f" - {n['date']}: {n['title']}")
if __name__ == '__main__':
update_index()

124
tools/youtube_subs.py Executable file
View File

@@ -0,0 +1,124 @@
#!/usr/bin/env python3
"""
Download YouTube subtitles/transcript for summarization.
Usage: python3 youtube_subs.py <video_url> [language]
"""
import subprocess
import sys
import os
import json
import re
from pathlib import Path
def clean_vtt(content):
"""Convert VTT to plain text, removing timestamps and duplicates."""
lines = []
seen = set()
for line in content.split('\n'):
# Skip VTT headers, timestamps, positioning
if line.startswith('WEBVTT') or line.startswith('Kind:') or line.startswith('Language:'):
continue
if '-->' in line: # Timestamp line
continue
if line.strip().startswith('<'): # Positioning tags
continue
if not line.strip():
continue
if re.match(r'^\d+$', line.strip()): # Sequence numbers
continue
# Clean HTML tags
clean = re.sub(r'<[^>]+>', '', line).strip()
if clean and clean not in seen:
seen.add(clean)
lines.append(clean)
return ' '.join(lines)
def get_subtitles(url, lang='en'):
"""Download subtitles for a YouTube video."""
yt_dlp = os.path.expanduser('~/.local/bin/yt-dlp')
temp_dir = Path('/tmp/yt_subs')
temp_dir.mkdir(exist_ok=True)
# Clean old files
for f in temp_dir.glob('*'):
f.unlink()
# First, get video info
info_cmd = [yt_dlp, '--dump-json', '--no-download', url]
try:
result = subprocess.run(info_cmd, capture_output=True, text=True, timeout=30)
if result.returncode == 0:
info = json.loads(result.stdout)
title = info.get('title', 'Unknown')
duration = info.get('duration', 0)
print(f"Title: {title}", file=sys.stderr)
print(f"Duration: {duration//60}:{duration%60:02d}", file=sys.stderr)
except Exception as e:
title = "Unknown"
print(f"Could not get video info: {e}", file=sys.stderr)
# Try to get subtitles in order of preference
lang_preferences = [lang, 'ro', 'en', 'en-US', 'en-GB']
for try_lang in lang_preferences:
# Try manual subtitles first
cmd = [
yt_dlp,
'--write-subs',
'--sub-langs', try_lang,
'--skip-download',
'-o', str(temp_dir / '%(id)s.%(ext)s'),
url
]
subprocess.run(cmd, capture_output=True, timeout=60)
# Check if we got subtitles
for ext in ['vtt', 'srt', 'ass']:
for sub_file in temp_dir.glob(f'*.{try_lang}*.{ext}'):
content = sub_file.read_text(encoding='utf-8', errors='replace')
return title, clean_vtt(content)
# Try auto-generated subtitles
for try_lang in lang_preferences:
cmd = [
yt_dlp,
'--write-auto-subs',
'--sub-langs', try_lang,
'--skip-download',
'-o', str(temp_dir / '%(id)s.%(ext)s'),
url
]
subprocess.run(cmd, capture_output=True, timeout=60)
for ext in ['vtt', 'srt', 'ass']:
for sub_file in temp_dir.glob(f'*.{ext}'):
content = sub_file.read_text(encoding='utf-8', errors='replace')
text = clean_vtt(content)
if text:
return title, text
return title, None
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python3 youtube_subs.py <video_url> [language]")
sys.exit(1)
url = sys.argv[1]
lang = sys.argv[2] if len(sys.argv) > 2 else 'en'
title, transcript = get_subtitles(url, lang)
if transcript:
print(f"\n=== {title} ===\n")
print(transcript)
else:
print(f"No subtitles found for: {title}", file=sys.stderr)
sys.exit(1)