Ecosistem multi-agent complet
- SOUL-base.md partajat pentru toți agenții - 5 agenți specializați: work, health, growth, sprijin, scout - Fiecare agent cu SOUL.md, TOOLS.md, USER.md, AGENTS.md proprii - Symlinks pentru resurse partajate (notes/, kanban/, projects/) - Tags de domeniu (@work, @health, etc.) în YouTube notes - Script update_notes_index.py îmbunătățit cu domenii - HEARTBEAT.md cu verificări periodice - Grup sprijin pagină și fișe activități - Cleanup: șters agents/echo/ orfan
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Regenerates the notes index in notes.html based on files in notes/youtube/
|
||||
Run after adding new notes.
|
||||
Generează index.json pentru notes din fișierele .md
|
||||
Extrage titlu, dată, tags, și domenii (@work, @health, etc.)
|
||||
"""
|
||||
|
||||
import os
|
||||
@@ -9,67 +9,105 @@ import re
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
NOTES_DIR = Path(__file__).parent.parent / 'notes' / 'youtube'
|
||||
NOTES_HTML = Path(__file__).parent.parent / 'kanban' / 'notes.html'
|
||||
NOTES_DIR = Path(__file__).parent.parent / "notes" / "youtube"
|
||||
INDEX_FILE = NOTES_DIR / "index.json"
|
||||
|
||||
# Domenii de agenți
|
||||
VALID_DOMAINS = ['work', 'health', 'growth', 'sprijin', 'scout']
|
||||
|
||||
def extract_metadata(filepath):
|
||||
"""Extract title, date, tags from markdown file."""
|
||||
"""Extrage metadata din fișierul markdown"""
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Extract title (first # heading)
|
||||
title_match = re.search(r'^# (.+)$', content, re.MULTILINE)
|
||||
# Extrage titlul (prima linie cu #)
|
||||
title_match = re.search(r'^#\s+(.+)$', content, re.MULTILINE)
|
||||
title = title_match.group(1) if title_match else filepath.stem
|
||||
|
||||
# Extract date from filename (YYYY-MM-DD_...)
|
||||
date_match = re.match(r'(\d{4}-\d{2}-\d{2})', filepath.name)
|
||||
date = date_match.group(1) if date_match else "Unknown"
|
||||
|
||||
# Extract tags
|
||||
tags_match = re.search(r'\*\*Tags?:\*\*\s*(.+)$', content, re.MULTILINE)
|
||||
# Extrage tags (linia cu **Tags:** sau tags:)
|
||||
tags = []
|
||||
domains = []
|
||||
tags_match = re.search(r'\*\*Tags?:\*\*\s*(.+)$|^Tags?:\s*(.+)$', content, re.MULTILINE | re.IGNORECASE)
|
||||
if tags_match:
|
||||
tags = [t.strip().replace('#', '') for t in tags_match.group(1).split(',')]
|
||||
else:
|
||||
# Try to find hashtags
|
||||
tags = re.findall(r'#(\w+)', content)[:5]
|
||||
tags_str = tags_match.group(1) or tags_match.group(2)
|
||||
|
||||
# Extrage domenii (@work, @health, etc.)
|
||||
domain_matches = re.findall(r'@(\w+)', tags_str)
|
||||
domains = [d for d in domain_matches if d in VALID_DOMAINS]
|
||||
|
||||
# Extrage tags normale (#tag) - exclude domeniile
|
||||
all_tags = re.findall(r'#([\w-]+)', tags_str)
|
||||
tags = [t for t in all_tags if t not in VALID_DOMAINS]
|
||||
|
||||
# Extrage data din filename (YYYY-MM-DD_slug.md)
|
||||
date_match = re.match(r'(\d{4}-\d{2}-\d{2})_', filepath.name)
|
||||
date = date_match.group(1) if date_match else ""
|
||||
|
||||
# Extrage video URL
|
||||
video_match = re.search(r'\*\*(?:Video|Link):\*\*\s*(https?://[^\s]+)', content)
|
||||
video_url = video_match.group(1) if video_match else ""
|
||||
|
||||
# Extrage TL;DR (primele 200 caractere)
|
||||
tldr_match = re.search(r'##\s*📋?\s*TL;DR\s*\n+(.+?)(?=\n##|\n---|\Z)', content, re.DOTALL)
|
||||
tldr = ""
|
||||
if tldr_match:
|
||||
tldr = tldr_match.group(1).strip()[:200]
|
||||
if len(tldr_match.group(1).strip()) > 200:
|
||||
tldr += "..."
|
||||
|
||||
return {
|
||||
'file': filepath.name,
|
||||
'title': title[:50], # Truncate long titles
|
||||
'date': date,
|
||||
'tags': tags[:4] # Max 4 tags
|
||||
"file": filepath.name,
|
||||
"title": title,
|
||||
"date": date,
|
||||
"tags": tags,
|
||||
"domains": domains,
|
||||
"video": video_url,
|
||||
"tldr": tldr
|
||||
}
|
||||
|
||||
def update_index():
|
||||
"""Scan notes directory and update the HTML index."""
|
||||
if not NOTES_DIR.exists():
|
||||
print(f"Notes directory not found: {NOTES_DIR}")
|
||||
return
|
||||
|
||||
# Get all markdown files
|
||||
def generate_index():
|
||||
"""Generează index.json din toate fișierele .md"""
|
||||
notes = []
|
||||
for f in sorted(NOTES_DIR.glob('*.md'), reverse=True): # Newest first
|
||||
notes.append(extract_metadata(f))
|
||||
|
||||
# Read current HTML
|
||||
with open(NOTES_HTML, 'r', encoding='utf-8') as f:
|
||||
html = f.read()
|
||||
# Stats per domeniu
|
||||
domain_stats = {d: 0 for d in VALID_DOMAINS}
|
||||
|
||||
# Update the notesIndex
|
||||
index_json = json.dumps(notes, indent=12, ensure_ascii=False)
|
||||
for filepath in sorted(NOTES_DIR.glob("*.md"), reverse=True):
|
||||
if filepath.name == 'index.json':
|
||||
continue
|
||||
try:
|
||||
metadata = extract_metadata(filepath)
|
||||
notes.append(metadata)
|
||||
|
||||
# Update domain stats
|
||||
for d in metadata['domains']:
|
||||
domain_stats[d] += 1
|
||||
|
||||
domains_str = ' '.join([f'@{d}' for d in metadata['domains']]) if metadata['domains'] else '(no domain)'
|
||||
print(f" + {metadata['title'][:40]}... {domains_str}")
|
||||
except Exception as e:
|
||||
print(f" ! Error processing {filepath.name}: {e}")
|
||||
|
||||
# Replace the notesIndex in HTML
|
||||
pattern = r'const notesIndex = \[[\s\S]*?\];'
|
||||
replacement = f'const notesIndex = {index_json};'
|
||||
# Sortează după dată descrescător
|
||||
notes.sort(key=lambda x: x['date'], reverse=True)
|
||||
|
||||
new_html = re.sub(pattern, replacement, html)
|
||||
# Adaugă metadata globală
|
||||
output = {
|
||||
"notes": notes,
|
||||
"stats": {
|
||||
"total": len(notes),
|
||||
"by_domain": domain_stats
|
||||
},
|
||||
"domains": VALID_DOMAINS
|
||||
}
|
||||
|
||||
with open(NOTES_HTML, 'w', encoding='utf-8') as f:
|
||||
f.write(new_html)
|
||||
with open(INDEX_FILE, 'w', encoding='utf-8') as f:
|
||||
json.dump(output, f, indent=2, ensure_ascii=False)
|
||||
|
||||
print(f"Updated index with {len(notes)} notes:")
|
||||
for n in notes:
|
||||
print(f" - {n['date']}: {n['title']}")
|
||||
print(f"\n✅ Generated {INDEX_FILE} with {len(notes)} notes")
|
||||
print(f" Domains: {domain_stats}")
|
||||
return output
|
||||
|
||||
if __name__ == '__main__':
|
||||
update_index()
|
||||
if __name__ == "__main__":
|
||||
print("Scanning notes/youtube/...")
|
||||
generate_index()
|
||||
|
||||
Reference in New Issue
Block a user