sync: scission owner/template + brain-template-export + BRAIN_MODE guard + /visualize scope filter + port orphelins fix

This commit is contained in:
2026-03-21 02:34:47 +01:00
parent 78323a0094
commit 2fd53cce8e
93 changed files with 6953 additions and 684 deletions

72
scripts/brain-db-backup.sh Executable file
View File

@@ -0,0 +1,72 @@
#!/usr/bin/env bash
# brain-db-backup.sh — Backup journalier brain.db → repo git dédié
# Usage: bash scripts/brain-db-backup.sh [backup_dir]
# Cron: 0 4 * * * bash ~/Dev/Brain/scripts/brain-db-backup.sh
#
# Stratégie :
# 1. SQLite vacuum into backup (copie propre, pas de lock stale)
# 2. Commit daté dans le repo backup
# 3. Push Gitea (silencieux si remote absent)
# 4. Rétention : 30 fichiers max (rotation automatique)
set -euo pipefail
BRAIN_DB="${BRAIN_DB:-$HOME/Dev/Brain/brain.db}"
BACKUP_DIR="${1:-$HOME/Dev/Brain/brain-db-backup}"
RETENTION=30
DATE=$(date '+%Y-%m-%d')
BACKUP_FILE="brain-${DATE}.db"
# --- Vérifications ---
if [[ ! -f "$BRAIN_DB" ]]; then
echo "❌ brain.db introuvable : $BRAIN_DB" >&2
exit 1
fi
# --- Init repo backup si premier run ---
if [[ ! -d "$BACKUP_DIR" ]]; then
mkdir -p "$BACKUP_DIR"
git -C "$BACKUP_DIR" init
echo "# brain-db-backup" > "$BACKUP_DIR/README.md"
echo "Backups journaliers de brain.db — généré par brain-db-backup.sh" >> "$BACKUP_DIR/README.md"
echo "" >> "$BACKUP_DIR/README.md"
echo "*.db binary" > "$BACKUP_DIR/.gitattributes"
git -C "$BACKUP_DIR" add .
git -C "$BACKUP_DIR" commit -m "init: brain-db-backup repo"
echo "✅ Repo backup initialisé : $BACKUP_DIR"
fi
# --- Backup via SQLite vacuum (copie propre) ---
python3 -c "
import sqlite3, shutil, sys
src = '${BRAIN_DB}'
dst = '${BACKUP_DIR}/${BACKUP_FILE}'
conn = sqlite3.connect(src)
bkp = sqlite3.connect(dst)
conn.backup(bkp)
bkp.close()
conn.close()
print(f'✅ Backup : {dst}')
"
# --- Rotation : garder les N plus récents ---
cd "$BACKUP_DIR"
ls -1t brain-*.db 2>/dev/null | tail -n +$((RETENTION + 1)) | while read old; do
rm -f "$old"
echo "🗑 Rotation : $old supprimé"
done
# --- Commit ---
git -C "$BACKUP_DIR" add -A
if git -C "$BACKUP_DIR" diff --cached --quiet; then
echo " Aucun changement — brain.db identique au dernier backup"
exit 0
fi
git -C "$BACKUP_DIR" commit -m "backup: brain.db ${DATE}"
# --- Push (silencieux si pas de remote) ---
if git -C "$BACKUP_DIR" remote get-url origin &>/dev/null; then
git -C "$BACKUP_DIR" push -q && echo "✅ Push Gitea OK" || echo "⚠️ Push échoué (réseau ?)"
else
echo " Pas de remote — backup local uniquement. Ajouter : git -C $BACKUP_DIR remote add origin <url>"
fi

View File

@@ -49,16 +49,17 @@ if ! python3 -c "import sqlite3" 2>/dev/null; then
exit 1
fi
# --check : brain.db stale si plus vieux que le dernier commit touchant claims/ ou handoffs/
# --check : brain.db stale si plus vieux que le dernier commit touchant handoffs/ ou agents/
# Note: claims/ retiré (ADR-042 — brain.db est la source unique, plus de claims YAML)
if $CHECK_ONLY; then
if [[ ! -f "$DB_PATH" ]]; then
log "STALE: brain.db absent"
exit 2
fi
db_mtime=$(stat -c %Y "$DB_PATH" 2>/dev/null || echo 0)
last_commit_ts=$(git -C "$BRAIN_ROOT" log -1 --format="%ct" -- claims/ handoffs/ BRAIN-INDEX.md 2>/dev/null || echo 0)
last_commit_ts=$(git -C "$BRAIN_ROOT" log -1 --format="%ct" -- handoffs/ agents/ BRAIN-INDEX.md 2>/dev/null || echo 0)
if [[ "$last_commit_ts" -gt "$db_mtime" ]]; then
log "STALE: brain.db ($db_mtime) < dernier commit claims/handoffs ($last_commit_ts)"
log "STALE: brain.db ($db_mtime) < dernier commit handoffs/agents ($last_commit_ts)"
exit 2
fi
log "OK: brain.db à jour"

143
scripts/brain-dev.sh Executable file
View File

@@ -0,0 +1,143 @@
#!/usr/bin/env bash
# brain-dev.sh — Démarrage brain en mode dev local (laptop / offline)
# Usage : bash scripts/brain-dev.sh [--engine] [--ui]
# Sans arguments → démarre brain-engine (mock désactivé) + brain-ui
# --engine : démarre brain-engine localement sur :7700 (uvicorn)
# --ui : démarre brain-ui en dev (npm run dev)
# Sans aucun argument : démarre les deux (engine + ui)
set -euo pipefail
BRAIN_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
BRAIN_UI="$BRAIN_ROOT/brain-ui"
ENGINE_PORT=7700
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
ok() { echo -e "${GREEN}$1${NC}"; }
warn() { echo -e "${YELLOW}⚠️ $1${NC}"; }
info() { echo -e " $1"; }
# ── Parse args ────────────────────────────────────────────────────────────────
START_ENGINE=false
START_UI=false
if [[ $# -eq 0 ]]; then
START_ENGINE=true
START_UI=true
fi
for arg in "$@"; do
case "$arg" in
--engine) START_ENGINE=true ;;
--ui) START_UI=true ;;
*)
echo "Usage: bash scripts/brain-dev.sh [--engine] [--ui]"
echo " --engine : démarre brain-engine sur :$ENGINE_PORT"
echo " --ui : démarre brain-ui en dev"
echo " (sans args) : démarre les deux"
exit 1
;;
esac
done
echo ""
echo "╔══════════════════════════════════════════════╗"
echo "║ brain-dev.sh — mode dev local ║"
echo "╚══════════════════════════════════════════════╝"
echo ""
# ── Vérifications préalables ──────────────────────────────────────────────────
if $START_ENGINE; then
if ! command -v python3 &>/dev/null; then
warn "python3 non trouvé — impossible de démarrer brain-engine"
START_ENGINE=false
fi
if ! command -v uvicorn &>/dev/null && ! python3 -c "import uvicorn" 2>/dev/null; then
warn "uvicorn non installé — pip3 install uvicorn[standard]"
START_ENGINE=false
fi
fi
if $START_UI; then
if [[ ! -d "$BRAIN_UI" ]]; then
warn "brain-ui absent ($BRAIN_UI) — --ui ignoré"
START_UI=false
elif ! command -v npm &>/dev/null; then
warn "npm non trouvé — impossible de démarrer brain-ui"
START_UI=false
fi
fi
# ── Créer .env.local pour brain-ui ───────────────────────────────────────────
if [[ -d "$BRAIN_UI" ]]; then
if $START_ENGINE; then
# engine local disponible → pas de mock
cat > "$BRAIN_UI/.env.local" << 'EOF'
VITE_USE_MOCK=false
VITE_BRAIN_API=http://localhost:7700
EOF
ok "brain-ui/.env.local → engine local (:7700)"
else
# pas d'engine → mode mock
cat > "$BRAIN_UI/.env.local" << 'EOF'
VITE_USE_MOCK=true
VITE_BRAIN_API=
EOF
ok "brain-ui/.env.local → mode mock (pas de VPS requis)"
fi
fi
# ── Trap Ctrl+C → tuer les processus fils ────────────────────────────────────
PIDS=()
cleanup() {
echo ""
info "Arrêt en cours..."
for pid in "${PIDS[@]}"; do
kill "$pid" 2>/dev/null || true
done
wait 2>/dev/null || true
ok "Processus arrêtés proprement."
exit 0
}
trap cleanup INT TERM
# ── Démarrer brain-engine ─────────────────────────────────────────────────────
if $START_ENGINE; then
info "Démarrage brain-engine sur :$ENGINE_PORT..."
cd "$BRAIN_ROOT"
BRAIN_PORT=$ENGINE_PORT python3 -m uvicorn brain-engine.server:app \
--host 0.0.0.0 --port $ENGINE_PORT --reload 2>&1 | sed 's/^/[engine] /' &
PIDS+=($!)
ok "brain-engine démarré (PID ${PIDS[-1]})"
fi
# ── Démarrer brain-ui ─────────────────────────────────────────────────────────
if $START_UI; then
info "Démarrage brain-ui (npm run dev)..."
cd "$BRAIN_UI"
npm run dev 2>&1 | sed 's/^/[ui] /' &
PIDS+=($!)
ok "brain-ui démarré (PID ${PIDS[-1]})"
fi
if [[ ${#PIDS[@]} -eq 0 ]]; then
warn "Aucun processus démarré — vérifier les prérequis ci-dessus."
exit 1
fi
echo ""
if $START_ENGINE; then
info "brain-engine : http://localhost:$ENGINE_PORT"
info " /health : http://localhost:$ENGINE_PORT/health"
fi
if $START_UI; then
info "brain-ui : http://localhost:5173 (port Vite par défaut)"
fi
echo ""
info "Ctrl+C pour arrêter."
echo ""
# Attendre les processus fils
wait

View File

@@ -1,127 +1,29 @@
#!/usr/bin/env bash
# brain-index-regen.sh — Régénère la table ## Claims dans BRAIN-INDEX.md
# depuis les fichiers claims/sess-*.yml (BSI v3 — source unique de vérité)
#
# Gère les formats :
# v1 : name: + opened: + status:
# v2 : sess_id: + opened_at: + status:
# v3 : + satellite_type + zone (inféré) + result.status
# brain-index-regen.sh — Vérifie l'état des claims dans brain.db
# Post-ADR-042 : ne modifie plus BRAIN-INDEX.md (claims = brain.db source unique)
# Conservé pour compatibilité — les appels existants ne cassent pas.
#
# Usage : bash scripts/brain-index-regen.sh
# Appelé par : session-orchestrator (close sequence step 5)
# helloWorld (boot claim open)
#
# Anti-drift : lecture seule sur claims/*.yml — écriture uniquement sur BRAIN-INDEX.md ## Claims
# Sécurité : aucun secret dans les claims (garanti par secrets-guardian)
# Output : 1 ligne résumé (open/total)
set -euo pipefail
BRAIN_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
CLAIMS_DIR="$BRAIN_ROOT/claims"
INDEX_FILE="$BRAIN_ROOT/BRAIN-INDEX.md"
DB_PATH="$BRAIN_ROOT/brain.db"
if [[ ! -f "$INDEX_FILE" ]]; then
echo "❌ BRAIN-INDEX.md introuvable — chemin : $INDEX_FILE"
if [[ ! -f "$DB_PATH" ]]; then
echo "⚠️ brain.db absent — lancer: bash scripts/bsi-claim.sh init"
exit 1
fi
if [[ ! -d "$CLAIMS_DIR" ]]; then
echo "❌ claims/ introuvable — chemin : $CLAIMS_DIR"
exit 1
fi
# ── Parser tous les claims via Python (gère YAML multi-format proprement) ────
python3 - "$CLAIMS_DIR" "$INDEX_FILE" <<'PYEOF'
import sys, os, re
claims_dir = sys.argv[1]
index_path = sys.argv[2]
rows = []
open_count = 0
for filename in sorted(os.listdir(claims_dir)):
if not filename.startswith('sess-') or not filename.endswith('.yml'):
continue
filepath = os.path.join(claims_dir, filename)
with open(filepath, 'r') as f:
content = f.read()
def extract(pattern, text, default='—'):
m = re.search(pattern, text, re.MULTILINE)
if m:
return m.group(1).strip().strip('"\'')
return default
# Gère v1 (name:) et v2 (sess_id:)
def extract_first(*patterns):
for p in patterns:
m = re.search(p, content, re.MULTILINE)
if m:
return m.group(1).strip().strip('"\'')
return '—'
sess_id = extract_first(r'^sess_id:\s*(.+)', r'^name:\s*(sess-.+)')
scope = extract_first(r'^scope:\s*(.+)')
status = extract_first(r'^status:\s*(.+)')
opened = extract_first(r'^opened_at:\s*(.+)', r'^opened:\s*(.+)')
sat_type = extract_first(r'^satellite_type:\s*(.+)')
theme_br = extract_first(r'^theme_branch:\s*(.+)')
# Inférer zone depuis scope (BSI v3 — ADR-014)
KERNEL_SCOPES = ['agents/', 'profil/', 'scripts/', 'KERNEL.md',
'brain-constitution.md', 'brain-compose.yml']
PERSONAL_SCOPES = ['profil/capital', 'profil/objectifs', 'progression/', 'MYSECRETS']
zone = 'project'
for ks in KERNEL_SCOPES:
if ks in scope:
zone = 'kernel'
break
for ps in PERSONAL_SCOPES:
if ps in scope:
zone = 'personal'
break
# Résultat du close si disponible
result_status = extract(r'^\s+status:\s*(.+)', content)
if result_status in ('open', 'closed', 'stale', '—'):
result_status = '—'
# Indicateur satellite_type
type_display = sat_type if sat_type != '—' else '—'
theme_display = theme_br.replace('theme/', '') if theme_br != '—' else '—'
rows.append(f"| {sess_id} | {scope} | {status} | {opened} | {type_display} | {zone} | {result_status} |")
if status == 'open':
open_count += 1
table_rows = "\n".join(rows)
comment = ("<!-- ⚠️ TABLE GÉNÉRÉE — ne pas éditer manuellement.\n"
" Régénérée par : scripts/brain-index-regen.sh\n"
" Appelée par : session-orchestrator (close) + helloWorld (boot)\n"
" Source unique : claims/sess-*.yml (BSI v3) -->\n")
new_table = (f"{comment}Sessions actives à ce jour :\n\n"
f"| sess_id | scope | status | opened_at | type | zone | result |\n"
f"|---------|-------|--------|-----------|------|------|--------|\n"
f"{table_rows}")
# Lire BRAIN-INDEX.md
with open(index_path, 'r') as f:
content = f.read()
# Remplacer depuis le commentaire HTML (ou "Sessions actives") jusqu'au prochain "---"
# Deux patterns : avec ou sans commentaire généré
pattern = r'(?:<!--.*?-->\s*\n)?Sessions actives à ce jour :.*?(?=\n---)'
if not re.search(pattern, content, flags=re.DOTALL):
print("⚠️ Pattern claims non trouvé dans BRAIN-INDEX.md — pas de modification")
sys.exit(0)
new_content = re.sub(pattern, new_table, content, flags=re.DOTALL)
with open(index_path, 'w') as f:
f.write(new_content)
print(f"✅ BRAIN-INDEX.md régénéré — {open_count} claim(s) open / {len(rows)} total")
PYEOF
python3 -c "
import sqlite3, sys
conn = sqlite3.connect(sys.argv[1])
try:
total = conn.execute('SELECT COUNT(*) FROM claims').fetchone()[0]
opens = conn.execute(\"SELECT COUNT(*) FROM claims WHERE status='open'\").fetchone()[0]
print(f'✅ brain.db — {opens} claim(s) open / {total} total')
except Exception:
print('⚠️ Table claims absente — lancer: bash scripts/bsi-claim.sh init')
conn.close()
" "$DB_PATH"

1087
scripts/brain-launch.sh Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,193 @@
#!/usr/bin/env python3
"""brain-pair client — laptop/new machine side (ADR-041)
Scans LAN for a brain-pair server, sends code, receives config.
Usage: python3 brain-pair-client.py <brain_root> <code>
"""
import json
import os
import socket
import sys
import time
BRAIN_ROOT = sys.argv[1]
CODE = sys.argv[2]
BROADCAST_PORT = 7711 # UDP listen port
SCAN_TIMEOUT = 30 # seconds to scan for server
def get_ssh_pubkey():
"""Read the local SSH public key."""
for name in ["id_ed25519.pub", "id_rsa.pub", "id_ecdsa.pub"]:
path = os.path.expanduser(f"~/.ssh/{name}")
if os.path.exists(path):
with open(path) as f:
return f.read().strip()
return ""
def get_local_machine():
"""Read machine name from brain-compose.local.yml."""
try:
import yaml
compose_path = os.path.join(BRAIN_ROOT, "brain-compose.local.yml")
with open(compose_path) as f:
compose = yaml.safe_load(f)
return compose.get("machine", socket.gethostname())
except Exception:
return socket.gethostname()
def scan_for_server():
"""Listen for UDP broadcast from brain-pair server."""
print(f"🔍 Scan du LAN pour brain-pair server ({SCAN_TIMEOUT}s)...")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(2)
sock.bind(("0.0.0.0", BROADCAST_PORT))
start = time.time()
while time.time() - start < SCAN_TIMEOUT:
try:
data, addr = sock.recvfrom(1024)
msg = json.loads(data.decode())
if msg.get("type") == "brain-pair":
server_ip = msg["ip"]
server_port = msg["port"]
print(f" ✅ Serveur trouvé : {server_ip}:{server_port}")
sock.close()
return server_ip, server_port
except socket.timeout:
continue
except Exception:
continue
sock.close()
return None, None
def do_handshake(server_ip, server_port, code, machine, ssh_pubkey):
"""Connect to server, send code, receive config."""
print(f"🤝 Handshake avec {server_ip}:{server_port}...")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(15)
sock.connect((server_ip, server_port))
request = json.dumps({
"code": code,
"machine": machine,
"ssh_pubkey": ssh_pubkey,
})
sock.sendall(request.encode())
data = sock.recv(8192).decode()
sock.close()
response = json.loads(data)
return response
def apply_config(response, server_ip):
"""Apply received config to local brain-compose.local.yml."""
import yaml
if response.get("status") != "ok":
print(f"❌ Pairing refusé : {response.get('msg', 'unknown error')}")
return False
server_machine = response["machine"]
api_key = response.get("api_key")
engine_port = response.get("brain_engine_port", 7700)
compose_path = os.path.join(BRAIN_ROOT, "brain-compose.local.yml")
# Read or create compose
if os.path.exists(compose_path):
with open(compose_path) as f:
compose = yaml.safe_load(f) or {}
else:
compose = {
"machine": get_local_machine(),
"instances": {},
"kernel_path": BRAIN_ROOT,
}
# Add peer
if "peers" not in compose:
compose["peers"] = {}
compose["peers"][server_machine] = {
"url": f"http://{server_ip}:{engine_port}",
"active": True,
}
# Inject API key if provided
if api_key:
instances = compose.get("instances", {})
# Find or create active instance
active_found = False
for name, inst in instances.items():
if inst.get("active"):
inst["brain_api_key"] = api_key
active_found = True
break
if not active_found:
machine = compose.get("machine", "unknown")
instances[machine] = {
"active": True,
"brain_name": machine,
"brain_api_key": api_key,
"path": BRAIN_ROOT,
}
compose["instances"] = instances
with open(compose_path, "w") as f:
yaml.dump(compose, f, default_flow_style=False, allow_unicode=True)
print(f" ✅ Peer {server_machine} ({server_ip}) ajouté")
if api_key:
print(f" ✅ Brain API Key injectée dans brain-compose.local.yml")
print(f" ✅ brain-compose.local.yml mis à jour")
# Add server host to known_hosts
os.system(f"ssh-keyscan -H {server_ip} >> ~/.ssh/known_hosts 2>/dev/null")
print(f" ✅ Fingerprint {server_ip} ajoutée à known_hosts")
return True
def main():
machine = get_local_machine()
ssh_pubkey = get_ssh_pubkey()
print(f"🔗 brain-pair join — machine : {machine}")
if not ssh_pubkey:
print(f"⚠️ Aucune clé SSH trouvée — ssh-keygen recommandé")
print()
# Scan LAN
server_ip, server_port = scan_for_server()
if not server_ip:
print(f"❌ Aucun serveur brain-pair trouvé sur le LAN")
print(f" Vérifier : brain-pair.sh start sur la machine source")
sys.exit(1)
# Handshake
response = do_handshake(server_ip, server_port, CODE, machine, ssh_pubkey)
# Apply config
success = apply_config(response, server_ip)
if success:
print(f"\n✅ Pairing terminé !")
print(f" Vérifier : bash scripts/bsi-query.sh peers")
print(f" Secrets : bash scripts/brain-secrets-sync.sh status")
else:
print(f"\n❌ Pairing échoué")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,197 @@
#!/usr/bin/env python3
"""brain-pair server — desktop side (ADR-041)
Generates a 6-digit code, broadcasts on LAN, waits for client handshake.
Exchanges: API key, SSH pubkey, peer config. Never MYSECRETS.
Usage: python3 brain-pair-server.py <brain_root>
"""
import json
import os
import random
import socket
import sys
import threading
import time
import subprocess
BRAIN_ROOT = sys.argv[1]
PAIR_PORT = 7710 # TCP handshake port
BROADCAST_PORT = 7711 # UDP broadcast port
CODE_TTL = 120 # seconds
TEST_CODE = os.environ.get("BRAIN_PAIR_TEST_CODE") # force code for testing
def get_machine_info():
"""Read local machine config."""
import yaml
compose_path = os.path.join(BRAIN_ROOT, "brain-compose.local.yml")
with open(compose_path) as f:
compose = yaml.safe_load(f)
machine = compose.get("machine", "unknown")
local_ip = get_local_ip()
# Read brain API key
instances = compose.get("instances", {})
api_key = None
for name, inst in instances.items():
if inst.get("active"):
api_key = inst.get("brain_api_key")
break
return {
"machine": machine,
"ip": local_ip,
"brain_engine_port": 7700,
"api_key": api_key,
}
def get_local_ip():
"""Get the LAN IP of this machine."""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
finally:
s.close()
def broadcast_presence(code, stop_event):
"""Broadcast pairing availability on LAN via UDP."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.settimeout(1)
local_ip = get_local_ip()
msg = json.dumps({
"type": "brain-pair",
"ip": local_ip,
"port": PAIR_PORT,
}).encode()
while not stop_event.is_set():
try:
sock.sendto(msg, ("<broadcast>", BROADCAST_PORT))
except OSError:
pass
time.sleep(1)
sock.close()
def handle_client(conn, addr, code, machine_info):
"""Handle a pairing handshake from a client."""
conn.settimeout(30)
try:
data = conn.recv(4096).decode()
request = json.loads(data)
# Verify code
if request.get("code") != code:
conn.sendall(json.dumps({"status": "error", "msg": "Invalid code"}).encode())
print(f"❌ Code invalide depuis {addr[0]}")
return False
client_machine = request.get("machine", "unknown")
client_ssh_pubkey = request.get("ssh_pubkey", "")
print(f"✅ Code vérifié — pairing avec {client_machine} ({addr[0]})")
# Build response (what we send to the client)
response = {
"status": "ok",
"machine": machine_info["machine"],
"ip": machine_info["ip"],
"brain_engine_port": machine_info["brain_engine_port"],
"api_key": machine_info["api_key"],
}
conn.sendall(json.dumps(response).encode())
# Add client SSH key to authorized_keys
if client_ssh_pubkey:
ak_path = os.path.expanduser("~/.ssh/authorized_keys")
comment = f" # brain-pair:{client_machine}"
key_line = client_ssh_pubkey.strip() + comment + "\n"
# Check if already present
existing = ""
if os.path.exists(ak_path):
with open(ak_path) as f:
existing = f.read()
if client_ssh_pubkey.strip().split()[1] not in existing:
with open(ak_path, "a") as f:
f.write(key_line)
print(f" ✅ Clé SSH de {client_machine} ajoutée à authorized_keys")
else:
print(f" Clé SSH de {client_machine} déjà présente")
# Add peer to brain-compose.local.yml
import yaml
compose_path = os.path.join(BRAIN_ROOT, "brain-compose.local.yml")
with open(compose_path) as f:
compose = yaml.safe_load(f)
if "peers" not in compose:
compose["peers"] = {}
compose["peers"][client_machine] = {
"url": f"http://{addr[0]}:7700",
"active": True,
}
with open(compose_path, "w") as f:
yaml.dump(compose, f, default_flow_style=False, allow_unicode=True)
print(f" ✅ Peer {client_machine} ajouté à brain-compose.local.yml")
return True
except Exception as e:
print(f"❌ Erreur handshake : {e}")
return False
finally:
conn.close()
def main():
code = TEST_CODE or f"{random.randint(0, 999999):06d}"
machine_info = get_machine_info()
print(f"🔗 brain-pair — en attente de connexion")
print(f" Machine : {machine_info['machine']} ({machine_info['ip']})")
print(f"")
print(f" Code : {code}")
print(f"")
print(f" Sur l'autre machine : brain-pair.sh join {code}")
print(f" Expire dans {CODE_TTL}s...")
print()
# Start broadcast
stop_event = threading.Event()
broadcast_thread = threading.Thread(target=broadcast_presence, args=(code, stop_event), daemon=True)
broadcast_thread.start()
# Listen for TCP connection
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.settimeout(CODE_TTL)
server.bind(("0.0.0.0", PAIR_PORT))
server.listen(1)
try:
conn, addr = server.accept()
success = handle_client(conn, addr, code, machine_info)
if success:
print(f"\n✅ Pairing terminé avec succès !")
print(f" Vérifier : bash scripts/bsi-query.sh peers")
else:
print(f"\n❌ Pairing échoué")
except socket.timeout:
print(f"\n⏱ Code expiré ({CODE_TTL}s) — relancer brain-pair.sh start")
finally:
stop_event.set()
server.close()
if __name__ == "__main__":
main()

105
scripts/brain-pair.sh Executable file
View File

@@ -0,0 +1,105 @@
#!/usr/bin/env bash
# brain-pair.sh — Pairing multi-machine type Bluetooth (ADR-041)
#
# Usage :
# brain-pair.sh start → génère code, écoute sur le LAN
# brain-pair.sh join <code> → scan LAN, envoie code, reçoit config
# brain-pair.sh list → machines pairées (peers dans brain-compose.local.yml)
# brain-pair.sh revoke <machine> → supprime une machine
#
# Sécurité : code 6 chiffres valide 60s, LAN only, MYSECRETS jamais échangé
# Tier free : python3 stdlib uniquement
set -euo pipefail
BRAIN_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
CMD="${1:-help}"
shift || true
case "$CMD" in
start)
python3 "$BRAIN_ROOT/scripts/brain-pair-server.py" "$BRAIN_ROOT"
;;
join)
CODE="${1:-}"
if [[ -z "$CODE" ]]; then
echo "❌ Usage: brain-pair.sh join <code>"
exit 1
fi
python3 "$BRAIN_ROOT/scripts/brain-pair-client.py" "$BRAIN_ROOT" "$CODE"
;;
list)
python3 -c "
import yaml, sys
compose_path = '$BRAIN_ROOT/brain-compose.local.yml'
try:
with open(compose_path) as f:
c = yaml.safe_load(f)
peers = c.get('peers', {})
machine = c.get('machine', 'unknown')
print(f'Machine locale : {machine}')
print(f'Peers configurés : {len(peers)}\n')
for name, info in peers.items():
status = '✅ active' if info.get('active') else '⬜ inactive'
url = info.get('url', '—')
print(f' {name} — {url} — {status}')
if not peers:
print(' (aucun peer)')
except FileNotFoundError:
print('❌ brain-compose.local.yml absent')
"
;;
revoke)
MACHINE="${1:-}"
if [[ -z "$MACHINE" ]]; then
echo "❌ Usage: brain-pair.sh revoke <machine>"
exit 1
fi
python3 - "$BRAIN_ROOT" "$MACHINE" <<'PYEOF'
import yaml, sys, os, subprocess
brain_root = sys.argv[1]
machine = sys.argv[2]
compose_path = os.path.join(brain_root, "brain-compose.local.yml")
with open(compose_path) as f:
compose = yaml.safe_load(f)
peers = compose.get("peers", {})
if machine not in peers:
print(f"⚠️ Peer '{machine}' non trouvé")
sys.exit(1)
peer_url = peers[machine].get("url", "")
host = peer_url.replace("http://", "").replace("https://", "").split(":")[0]
# Retirer du compose
del peers[machine]
compose["peers"] = peers
with open(compose_path, "w") as f:
yaml.dump(compose, f, default_flow_style=False, allow_unicode=True)
# Retirer de authorized_keys (lignes contenant le nom de machine)
ak_path = os.path.expanduser("~/.ssh/authorized_keys")
if os.path.exists(ak_path):
with open(ak_path) as f:
lines = f.readlines()
filtered = [l for l in lines if machine not in l]
if len(filtered) < len(lines):
with open(ak_path, "w") as f:
f.writelines(filtered)
print(f"✅ Clé SSH de {machine} retirée de authorized_keys")
print(f"✅ Peer '{machine}' révoqué de brain-compose.local.yml")
PYEOF
;;
help|*)
echo "brain-pair.sh — Pairing multi-machine (ADR-041)"
echo ""
echo "Usage :"
echo " start → génère code 6 chiffres, écoute sur le LAN (60s)"
echo " join <code> → scan LAN, envoie code, reçoit config"
echo " list → machines pairées"
echo " revoke <machine> → supprime un peer"
;;
esac

310
scripts/brain-secrets-sync.sh Executable file
View File

@@ -0,0 +1,310 @@
#!/usr/bin/env bash
# brain-secrets-sync.sh — Registre secrets + sync SSH (ADR-040)
#
# Usage :
# brain-secrets-sync.sh status → compare registre vs MYSECRETS local
# brain-secrets-sync.sh audit → secrets expirés, rotation due, manquants
# brain-secrets-sync.sh sync <peer> → récupère les secrets manquants via SSH
# brain-secrets-sync.sh diff <peer> → compare clés locales vs peer (sans valeurs)
#
# Sécurité :
# - Jamais de valeur affichée — noms de clés uniquement
# - Transport via SSH (chiffré par construction)
# - Gate humain obligatoire avant toute sync
#
# Tier free : python3 + pyyaml (pip install pyyaml si absent)
set -euo pipefail
BRAIN_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
SECRETS_DIR="$HOME/Dev/BrainSecrets"
REGISTRY="$SECRETS_DIR/secrets.yml"
MYSECRETS="$SECRETS_DIR/MYSECRETS"
COMPOSE_LOCAL="$BRAIN_ROOT/brain-compose.local.yml"
CMD="${1:-help}"
PEER="${2:-}"
# ── Vérifications ────────────────────────────────────────────
if [[ ! -f "$REGISTRY" ]]; then
echo "❌ Registre absent : $REGISTRY"
echo " → Créer avec le format ADR-040 (voir profil/decisions/040-*)"
exit 1
fi
if [[ ! -f "$MYSECRETS" ]]; then
echo "❌ MYSECRETS absent : $MYSECRETS"
exit 1
fi
# ── Commandes ────────────────────────────────────────────────
case "$CMD" in
status|audit|diff)
python3 - "$REGISTRY" "$MYSECRETS" "$COMPOSE_LOCAL" "$CMD" "$PEER" <<'PYEOF'
import sys, os
from datetime import datetime, date
registry_path = sys.argv[1]
mysecrets_path = sys.argv[2]
compose_path = sys.argv[3]
cmd = sys.argv[4]
peer = sys.argv[5] if len(sys.argv) > 5 else ""
# Parse YAML sans dépendance lourde (fallback si pyyaml absent)
try:
import yaml
with open(registry_path) as f:
registry = yaml.safe_load(f)
except ImportError:
# Fallback basique — parse les clés du registre
print("⚠️ pyyaml absent — install: pip install pyyaml")
print(" Fallback : comparaison clés MYSECRETS uniquement")
registry = None
# Parse MYSECRETS (KEY=VALUE)
local_keys = set()
with open(mysecrets_path) as f:
for line in f:
line = line.strip()
if line and not line.startswith('#') and '=' in line:
key = line.split('=', 1)[0].strip()
if key:
local_keys.add(key)
# Détecter la machine courante
machine = "unknown"
if os.path.exists(compose_path):
try:
with open(compose_path) as f:
compose = yaml.safe_load(f) if registry else {}
machine = compose.get("machine", "unknown")
except Exception:
pass
if registry is None:
sys.exit(0)
secrets = registry.get("secrets", {})
if cmd == "status":
print(f"📋 Registre : {len(secrets)} secrets | Machine : {machine}")
print(f" MYSECRETS : {len(local_keys)} clés locales\n")
missing = []
present = []
other_machine = []
registry_keys = set(secrets.keys())
extra = local_keys - registry_keys # clés locales absentes du registre
for key, meta in secrets.items():
machines = meta.get("machines", [])
required = meta.get("required", False)
scope = meta.get("scope", "—")
if machine in machines or machine == "unknown":
if key in local_keys:
present.append(key)
else:
tag = "🔴 REQUIRED" if required else "⚪ optional"
missing.append(f" {tag} {key} (scope: {scope})")
elif key in local_keys:
other_machine.append(f" {key} → déclaré pour {machines}")
if missing:
print(f"❌ Manquants ({len(missing)}) :")
for m in missing:
print(m)
else:
print(f"✅ Tous les secrets requis pour {machine} sont présents")
if other_machine:
print(f"\n Clés présentes localement mais assignées à d'autres machines ({len(other_machine)}) :")
for o in other_machine:
print(o)
if extra:
print(f"\n⚠ Clés dans MYSECRETS absentes du registre ({len(extra)}) :")
for k in sorted(extra):
print(f" ? {k} → ajouter dans secrets.yml")
print(f"\n✅ {len(present)} clés présentes et déclarées")
elif cmd == "audit":
today = date.today()
issues = []
for key, meta in secrets.items():
expires = meta.get("expires_at")
rotated = meta.get("rotated_at")
required = meta.get("required", False)
if expires:
try:
exp_date = date.fromisoformat(str(expires))
days_left = (exp_date - today).days
if days_left < 0:
issues.append(f" 🔴 EXPIRÉ : {key} — expiré depuis {-days_left}j")
elif days_left < 30:
issues.append(f" 🟡 EXPIRE BIENTÔT : {key} — {days_left}j restants")
except ValueError:
pass
if rotated:
try:
rot_date = date.fromisoformat(str(rotated))
age = (today - rot_date).days
if age > 180 and required:
issues.append(f" 🟡 ROTATION DUE : {key} — dernière rotation il y a {age}j")
except ValueError:
pass
if issues:
print(f"🔍 Audit — {len(issues)} problème(s) :\n")
for i in issues:
print(i)
else:
print("✅ Audit clean — aucun secret expiré ou en attente de rotation")
# Stats par scope
scopes = {}
for key, meta in secrets.items():
s = meta.get("scope", "unknown")
scopes[s] = scopes.get(s, 0) + 1
print(f"\n📊 {len(secrets)} secrets répartis :")
for s, n in sorted(scopes.items()):
print(f" {s}: {n}")
elif cmd == "diff":
if not peer:
print("❌ Usage: brain-secrets-sync.sh diff <peer>")
print(" Ex: brain-secrets-sync.sh diff laptop")
sys.exit(1)
print(f"📋 Diff registre : {machine} vs {peer}\n")
local_expected = set()
peer_expected = set()
for key, meta in secrets.items():
machines = meta.get("machines", [])
if machine in machines:
local_expected.add(key)
if peer in machines:
peer_expected.add(key)
both = local_expected & peer_expected
only_local = local_expected - peer_expected
only_peer = peer_expected - local_expected
print(f" Communs : {len(both)}")
print(f" {machine} only : {len(only_local)}")
print(f" {peer} only : {len(only_peer)}")
if only_local:
print(f"\n Sur {machine} uniquement :")
for k in sorted(only_local):
print(f" {k}")
if only_peer:
print(f"\n Sur {peer} uniquement :")
for k in sorted(only_peer):
print(f" {k}")
PYEOF
;;
sync)
if [[ -z "$PEER" ]]; then
echo "❌ Usage: brain-secrets-sync.sh sync <peer>"
echo " Ex: brain-secrets-sync.sh sync desktop"
echo ""
echo " Peers connus (brain-compose.local.yml) :"
grep -A2 "peers:" "$COMPOSE_LOCAL" 2>/dev/null | grep -E "^\s+\w+:" | sed 's/://;s/^ / /' || echo " (aucun peer configuré)"
exit 1
fi
# Résoudre l'IP du peer
PEER_URL=$(python3 -c "
import yaml, sys
with open('$COMPOSE_LOCAL') as f:
c = yaml.safe_load(f)
peers = c.get('peers', {})
p = peers.get('$PEER', {})
url = p.get('url', '')
if url:
# Extraire host de http://ip:port
host = url.replace('http://','').replace('https://','').split(':')[0]
print(host)
" 2>/dev/null || echo "")
if [[ -z "$PEER_URL" ]]; then
echo "❌ Peer '$PEER' non trouvé dans brain-compose.local.yml"
echo " Ajouter sous peers: dans brain-compose.local.yml"
exit 1
fi
echo "🔄 Sync depuis $PEER ($PEER_URL)"
echo ""
echo "⚠️ CONFIRMATION REQUISE — cette commande va :"
echo " 1. Lire les noms de clés sur $PEER via SSH (pas les valeurs)"
echo " 2. Identifier les clés manquantes localement"
echo " 3. Copier UNIQUEMENT les clés manquantes via SSH"
echo ""
read -p "Continuer ? (oui/non) " confirm
if [[ "$confirm" != "oui" ]]; then
echo "Annulé."
exit 0
fi
# Étape 1 : lister les clés sur le peer
echo ""
echo "→ Lecture des clés sur $PEER..."
PEER_KEYS=$(ssh "$PEER_URL" "grep '^[^#].*=' ~/Dev/BrainSecrets/MYSECRETS 2>/dev/null | cut -d= -f1 | sort" 2>/dev/null || echo "")
if [[ -z "$PEER_KEYS" ]]; then
echo "❌ Impossible de lire MYSECRETS sur $PEER"
echo " Vérifier : ssh $PEER_URL 'test -f ~/Dev/BrainSecrets/MYSECRETS'"
exit 1
fi
# Étape 2 : identifier les manquantes
LOCAL_KEYS=$(grep "^[^#].*=" "$MYSECRETS" | cut -d= -f1 | sort)
MISSING=$(comm -23 <(echo "$PEER_KEYS") <(echo "$LOCAL_KEYS"))
if [[ -z "$MISSING" ]]; then
echo "✅ Aucune clé manquante — MYSECRETS déjà complet"
exit 0
fi
echo "Clés manquantes localement :"
echo "$MISSING" | sed 's/^/ /'
echo ""
read -p "Copier ces clés depuis $PEER ? (oui/non) " confirm2
if [[ "$confirm2" != "oui" ]]; then
echo "Annulé."
exit 0
fi
# Étape 3 : copier les valeurs manquantes via SSH (jamais affichées)
for key in $MISSING; do
ssh "$PEER_URL" "grep '^${key}=' ~/Dev/BrainSecrets/MYSECRETS" >> "$MYSECRETS" 2>/dev/null
echo "$key"
done
echo ""
echo "✅ Sync terminée — $(echo "$MISSING" | wc -l) clé(s) ajoutée(s) à MYSECRETS"
echo " Les valeurs n'ont jamais été affichées."
;;
help|*)
echo "brain-secrets-sync.sh — Registre secrets + sync SSH (ADR-040)"
echo ""
echo "Usage :"
echo " status → compare registre vs MYSECRETS local"
echo " audit → secrets expirés, rotation due"
echo " sync <peer> → récupère les secrets manquants via SSH"
echo " diff <peer> → compare clés par machine (sans valeurs)"
echo ""
echo "Registre : ~/Dev/BrainSecrets/secrets.yml"
echo "Valeurs : ~/Dev/BrainSecrets/MYSECRETS"
;;
esac

View File

@@ -1,151 +1,232 @@
#!/bin/bash
# brain-setup.sh — First boot setup (fresh fork)
# Idempotent — safe à relancer si une étape a échoué.
# brain-setup.sh — Setup complet brain sur une nouvelle machine
# Usage : bash brain-setup.sh [brain_name] [brain_root]
# Ex : bash brain-setup.sh prod-laptop ~/Dev/Brain
#
# Usage : bash scripts/brain-setup.sh
# Ce script est idempotent — safe à relancer si une étape a échoué.
set -euo pipefail
BRAIN_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
CLAUDE_DIR="$HOME/.claude"
# ── Config ──────────────────────────────────────────────────────────────────
GITEA="git@git.tetardtek.com:Tetardtek"
BRAIN_NAME="${1:-prod-laptop}"
BRAIN_ROOT="${2:-$HOME/Dev/Brain}"
ok() { echo "OK $1"; }
warn() { echo "WRN $1"; }
ask() { printf "\n? %s\n> " "$1"; }
REPOS=(
"brain:$BRAIN_ROOT"
"toolkit:$BRAIN_ROOT/toolkit"
"progression-coach:$BRAIN_ROOT/progression"
"brain-agent-review:$BRAIN_ROOT/reviews"
"brain-profil:$BRAIN_ROOT/profil"
"brain-todo:$BRAIN_ROOT/todo"
"brain.wiki:$BRAIN_ROOT/wiki"
)
# brain-ui est dans le monorepo principal (brain-ui/ sous BRAIN_ROOT) — pas un satellite séparé
# ── Couleurs ─────────────────────────────────────────────────────────────────
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
ok() { echo -e "${GREEN}$1${NC}"; }
warn() { echo -e "${YELLOW}⚠️ $1${NC}"; }
info() { echo -e " $1"; }
echo ""
echo "=== brain-template — First boot setup ==="
echo " Chemin : $BRAIN_ROOT"
echo "╔══════════════════════════════════════════════╗"
echo " brain-setup.sh — nouvelle machine ║"
echo "║ brain_name : $BRAIN_NAME"
echo "║ brain_root : $BRAIN_ROOT"
echo "╚══════════════════════════════════════════════╝"
echo ""
# ETAPE 1 — PATHS.md
echo "--- 1/5 Chemins machine ---"
if grep -q '<BRAIN_ROOT>' "$BRAIN_ROOT/PATHS.md" 2>/dev/null; then
ask "Chemin absolu du brain [Entree = $BRAIN_ROOT]"
read -r brain_path; brain_path="${brain_path:-$BRAIN_ROOT}"
ask "Chemin projets [ex: $HOME/Dev/Projects]"
read -r projects_path; projects_path="${projects_path:-$HOME/Dev/Projects}"
ask "URL Git [ex: git@github.com:alice]"
read -r gitea_url; gitea_url="${gitea_url:-git@github.com:<USERNAME>}"
ask "Username Git"
read -r username; username="${username:-<USERNAME>}"
sed -i \
-e "s|<BRAIN_ROOT>|$brain_path|g" \
-e "s|<PROJECTS_ROOT>|$projects_path|g" \
-e "s|<GITEA_URL>|$gitea_url|g" \
-e "s|<USERNAME>|$username|g" \
-e "s|<HOME>|$HOME|g" \
"$BRAIN_ROOT/PATHS.md"
ok "PATHS.md configure"
else
ok "PATHS.md deja configure"
brain_path="$BRAIN_ROOT"
# ── Étape 0 — SSH key ────────────────────────────────────────────────────────
echo "[ 0/5 ] Vérification SSH key Gitea..."
if ! ssh -T git@git.tetardtek.com -o StrictHostKeyChecking=no 2>&1 | grep -qE "Welcome|Hi there"; then
warn "Clé SSH Gitea non configurée."
info "Créer une clé :"
info " ssh-keygen -t ed25519 -C 'laptop@brain'"
info " cat ~/.ssh/id_ed25519.pub"
info " → Ajouter dans Gitea : Settings > SSH Keys"
echo ""
read -p " Appuie sur Entrée quand la clé est ajoutée dans Gitea..." _
fi
ok "SSH Gitea OK"
# ETAPE 2 — CLAUDE.md global
# ── Étape 1 — Cloner les satellites ──────────────────────────────────────────
echo ""
echo "--- 2/5 CLAUDE.md global ---"
CLAUDE_MD="$CLAUDE_DIR/CLAUDE.md"
brain_name="prod"
if [ ! -f "$CLAUDE_MD" ]; then
ask "Nom de cette instance ? [prod / dev / laptop]"
read -r brain_name; brain_name="${brain_name:-prod}"
mkdir -p "$CLAUDE_DIR"
cat > "$CLAUDE_MD" << EOF
# CLAUDE.md
echo "[ 1/5 ] Clonage des satellites..."
for entry in "${REPOS[@]}"; do
repo="${entry%%:*}"
dest="${entry#*:}"
dest="${dest/#\~/$HOME}"
brain_root: ${brain_path:-$BRAIN_ROOT}
brain_name: $brain_name
## Bootstrap
0. ${brain_path:-$BRAIN_ROOT}/PATHS.md
1. ${brain_path:-$BRAIN_ROOT}/profil/collaboration.md
2. ${brain_path:-$BRAIN_ROOT}/agents/coach.md
3. ${brain_path:-$BRAIN_ROOT}/agents/helloWorld.md
helloWorld prend le relais.
EOF
ok "~/.claude/CLAUDE.md cree (brain_name: $brain_name)"
else
ok "~/.claude/CLAUDE.md existe"
brain_name=$(grep 'brain_name:' "$CLAUDE_MD" | sed 's/.*: *//' | tr -d ' ' | head -1 || echo "prod")
fi
# ETAPE 3 — brain-compose.local.yml
echo ""
echo "--- 3/5 brain-compose.local.yml ---"
LOCAL="$BRAIN_ROOT/brain-compose.local.yml"
tier="free"
if [ ! -f "$LOCAL" ]; then
ask "Tier ? [free / pro / full]"
read -r tier; tier="${tier:-free}"
api_key=""
if [ "$tier" != "free" ]; then
ask "Cle API"
read -r api_key
fi
cat > "$LOCAL" << EOF
brain_name: $brain_name
kernel_path: ${brain_path:-$BRAIN_ROOT}
tier: $tier
$([ -n "${api_key:-}" ] && echo "api_key: $api_key" || echo "# api_key: (tier free)")
instances:
$brain_name:
path: ${brain_path:-$BRAIN_ROOT}
brain_name: $brain_name
EOF
ok "brain-compose.local.yml cree (tier: $tier)"
else
ok "brain-compose.local.yml existe"
fi
# ETAPE 3b — collaboration.md
echo ""
echo "--- 3b/5 Profil collaboration ---"
COLLAB="$BRAIN_ROOT/profil/collaboration.md"
COLLAB_EX="$BRAIN_ROOT/profil/collaboration.md.example"
if [ ! -f "$COLLAB" ] && [ -f "$COLLAB_EX" ]; then
cp "$COLLAB_EX" "$COLLAB"
ok "profil/collaboration.md cree depuis .example — a personnaliser"
else
ok "profil/collaboration.md existe"
fi
# ETAPE 4 — Git remote
echo ""
echo "--- 4/5 Git remote ---"
current_origin=$(git -C "$BRAIN_ROOT" remote get-url origin 2>/dev/null || echo "")
if echo "$current_origin" | grep -q "brain-template"; then
ask "URL de TON repo ? (skip pour ignorer)"
read -r new_remote
if [ "$new_remote" != "skip" ] && [ -n "$new_remote" ]; then
git -C "$BRAIN_ROOT" remote set-url origin "$new_remote"
git -C "$BRAIN_ROOT" remote add upstream "$current_origin" 2>/dev/null || true
ok "origin -> $new_remote / upstream -> brain-template"
if [[ -d "$dest/.git" ]]; then
info "$repo → déjà cloné ($dest) — git pull..."
git -C "$dest" pull --ff-only 2>/dev/null || warn "$repo : pull échoué (conflits ?) — vérifier manuellement"
else
warn "Remote non modifie"
mkdir -p "$(dirname "$dest")"
git clone "$GITEA/$repo.git" "$dest"
ok "$repo$dest"
fi
else
ok "Remote : $current_origin"
done
ok "Tous les satellites clonés"
# ── Étape 2 — CLAUDE.md ──────────────────────────────────────────────────────
echo ""
echo "[ 2/5 ] Configuration CLAUDE.md..."
CLAUDE_TARGET="$HOME/.claude/CLAUDE.md"
CLAUDE_EXAMPLE="$BRAIN_ROOT/profil/CLAUDE.md.example"
mkdir -p "$HOME/.claude"
if [[ -f "$CLAUDE_TARGET" ]]; then
warn "~/.claude/CLAUDE.md existe déjà — backup → CLAUDE.md.bak"
cp "$CLAUDE_TARGET" "$CLAUDE_TARGET.bak"
fi
# ETAPE 5 — Validation
echo ""
echo "--- 5/5 Validation ---"
bash "$BRAIN_ROOT/scripts/kernel-isolation-check.sh" 2>&1 | tail -2
cp "$CLAUDE_EXAMPLE" "$CLAUDE_TARGET"
sed -i "s|<BRAIN_ROOT>|$BRAIN_ROOT|g" "$CLAUDE_TARGET"
sed -i "s|<BRAIN_NAME>|$BRAIN_NAME|g" "$CLAUDE_TARGET"
ok "~/.claude/CLAUDE.md configuré (brain_name=$BRAIN_NAME, brain_root=$BRAIN_ROOT)"
# ── Étape 3 — brain-compose.local.yml ────────────────────────────────────────
echo ""
echo "=== Setup termine ==="
echo "[ 3/5 ] brain-compose.local.yml..."
LOCAL_COMPOSE="$BRAIN_ROOT/brain-compose.local.yml"
KERNEL_VERSION=$(grep '^version:' "$BRAIN_ROOT/brain-compose.yml" | awk '{print $2}' | tr -d '"')
if [[ -f "$LOCAL_COMPOSE" ]]; then
warn "brain-compose.local.yml existe déjà — skip"
else
cat > "$LOCAL_COMPOSE" << EOF
# brain-compose.local.yml — Registre machine ($BRAIN_NAME)
# NON VERSIONNÉ — gitignored.
kernel_path: $BRAIN_ROOT
kernel_version: "$KERNEL_VERSION"
last_kernel_sync: "$(date +%Y-%m-%d)"
machine: $BRAIN_NAME
write_mode: readonly_kernel # nouvelle machine = jamais kernel writer
instances:
$BRAIN_NAME:
path: $BRAIN_ROOT
brain_name: $BRAIN_NAME
feature_set: full
mode: prod
docs_fetch: ask
config_status: hydrated
active: true
EOF
ok "brain-compose.local.yml créé"
fi
# ── Lock kernel push (nouvelle machine = readonly) ────────────────────────────
git -C "$BRAIN_ROOT" remote set-url --push origin no_push
ok "Kernel push lockée (write_mode: readonly_kernel)"
# ── Étape 3.5 — Brain API Key (optionnel) ────────────────────────────────────
echo ""
echo " brain_root : ${brain_path:-$BRAIN_ROOT}"
echo " tier : ${tier:-free}"
echo "[ 3.5/5 ] Brain API Key (optionnel)..."
info "Obtenir une clé : contacter le mainteneur du brain (tier free = aucune clé requise)"
info "Format attendu : bk_live_<32chars> (prod) ou bk_test_<32chars> (dev)"
echo ""
echo " Ouvre Claude Code dans ce dossier."
echo " Il t'attend."
read -rp " Brain API Key (Entrée pour passer, tier free) : " api_key
if [[ -n "$api_key" ]]; then
if [[ ! "$api_key" =~ ^bk_(live|test)_ ]]; then
warn "Format invalide — clé ignorée (attendu : bk_live_... ou bk_test_...)"
else
sed -i "s|^brain_api_key:.*|brain_api_key: $api_key|" "$BRAIN_ROOT/brain-compose.yml"
ok "Clé enregistrée dans brain-compose.yml"
info "Le key-guardian validera au prochain boot (timeout 3s, grace 72h si VPS down)."
fi
else
info "Tier free — aucune clé configurée."
fi
# ── Étape 4 — MYSECRETS ──────────────────────────────────────────────────────
echo ""
echo "[ 4/5 ] MYSECRETS..."
MYSECRETS="$BRAIN_ROOT/MYSECRETS"
if [[ -f "$MYSECRETS" ]]; then
ok "MYSECRETS présent"
else
warn "MYSECRETS absent — jamais versionné."
info ""
info "Options pour le récupérer :"
info " A) Copie sécurisée depuis le desktop :"
info " scp tetardtek@<desktop-ip>:~/Dev/Brain/MYSECRETS $MYSECRETS"
info ""
info " B) Recréer manuellement :"
info " cp $BRAIN_ROOT/MYSECRETS.example $MYSECRETS (si le fichier exemple existe)"
info " → Remplir les valeurs manuellement"
info ""
warn "Le brain fonctionne sans MYSECRETS mais les sessions secrets seront bloquées."
fi
# ── Étape 5 — Claude Code ────────────────────────────────────────────────────
echo ""
echo "[ 5/5 ] Claude Code..."
if command -v claude &>/dev/null; then
ok "Claude Code installé ($(claude --version 2>/dev/null || echo 'version inconnue'))"
else
warn "Claude Code non installé."
info " npm install -g @anthropic-ai/claude-code"
info " ou : https://claude.ai/code"
fi
# ── Étape 5.5 — Node.js ──────────────────────────────────────────────────────
echo ""
echo "[ 5.5 ] Node.js..."
if command -v node &>/dev/null && command -v npm &>/dev/null; then
ok "Node.js $(node --version) / npm $(npm --version)"
else
warn "Node.js ou npm absent."
info " Option A — nvm (recommandé) :"
info " curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash"
info " nvm install --lts"
info " Option B — apt :"
info " sudo apt install nodejs npm"
fi
# ── Étape 5.75 — Python3 + pip + brain-engine deps ───────────────────────────
echo ""
echo "[ 5.75 ] Python3 + brain-engine..."
if ! command -v python3 &>/dev/null; then
warn "python3 absent — installer via : sudo apt install python3 python3-pip"
elif ! command -v pip3 &>/dev/null; then
warn "pip3 absent — installer via : sudo apt install python3-pip"
else
ok "Python $(python3 --version 2>&1 | awk '{print $2}') / pip $(pip3 --version 2>&1 | awk '{print $2}')"
REQUIREMENTS="$BRAIN_ROOT/brain-engine/requirements.txt"
if [[ -f "$REQUIREMENTS" ]]; then
info "Installation des dépendances brain-engine..."
pip3 install -r "$REQUIREMENTS" --break-system-packages --quiet && ok "brain-engine deps OK" || warn "pip3 install a échoué — vérifier manuellement"
else
warn "brain-engine/requirements.txt absent — skip pip install"
fi
fi
# ── Résumé ────────────────────────────────────────────────────────────────────
echo ""
echo "╔══════════════════════════════════════════════╗"
echo "║ Setup terminé ║"
echo "╚══════════════════════════════════════════════╝"
echo ""
echo " brain_name : $BRAIN_NAME"
echo " brain_root : $BRAIN_ROOT"
echo ""
echo " Modes de démarrage :"
echo " → Dev laptop (mock, pas de VPS) :"
echo " bash $BRAIN_ROOT/scripts/brain-dev.sh"
echo " → Dev laptop + engine local :"
echo " bash $BRAIN_ROOT/scripts/brain-dev.sh --engine"
echo " → Session Claude Code :"
echo " Ouvrir Claude Code dans $BRAIN_ROOT"
echo " Le brain se boot automatiquement via CLAUDE.md"
echo ""
warn "Si MYSECRETS est absent : le remplir avant la première session work."
echo ""
echo " (Il va te poser une derniere question.)"

89
scripts/brain-start-laptop.sh Executable file
View File

@@ -0,0 +1,89 @@
#!/usr/bin/env bash
# brain-start-laptop.sh — Démarre l'environnement brain sur le laptop
# Lancé après un reboot ou en début de session.
#
# Usage : bash scripts/brain-start-laptop.sh
#
# Lance :
# 1. Ollama (si pas déjà up)
# 2. brain-engine/server.py → port 7700
# 3. Vérifie la connexion peer desktop
# 4. Affiche l'écart embeddings (sync si besoin)
#
# Le script reste en foreground — brain-engine tourne tant que le terminal est ouvert.
# Laisser tourner dans un terminal dédié, travailler dans un autre.
# Arrêt propre : Ctrl+C (trap SIGINT → kill brain-engine)
# Pour nous uniquement — pas dans le template.
set -euo pipefail
BRAIN_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
LOG_SERVER="$BRAIN_ROOT/brain-engine/server-local.log"
DESKTOP_PEER="192.168.1.11"
cleanup() {
echo ""
echo "→ Arrêt brain laptop..."
kill "$PID_SERVER" 2>/dev/null || true
exit 0
}
trap cleanup SIGINT SIGTERM
echo ""
echo "=== 🧠 Brain laptop — startup ==="
echo " Root : $BRAIN_ROOT"
echo ""
# 1. Ollama
echo "--- 1/4 Ollama ---"
if ! pgrep -x ollama > /dev/null 2>&1; then
sudo systemctl start ollama 2>/dev/null || ollama serve &>/dev/null &
sleep 2
fi
if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then
echo "✅ Ollama up"
else
echo "⚠️ Ollama non disponible — RAG local désactivé"
fi
# 2. Brain-engine
echo ""
echo "--- 2/4 brain-engine ---"
# Kill instance précédente si elle tourne
pkill -f "python3 brain-engine/server.py" 2>/dev/null || true
sleep 1
cd "$BRAIN_ROOT"
python3 brain-engine/server.py > "$LOG_SERVER" 2>&1 &
PID_SERVER=$!
sleep 3
if kill -0 "$PID_SERVER" 2>/dev/null; then
echo "✅ brain-engine PID $PID_SERVER → http://localhost:7700"
else
echo "❌ brain-engine n'a pas démarré — voir $LOG_SERVER"
exit 1
fi
# 3. Peer desktop
echo ""
echo "--- 3/4 Peer desktop ---"
if curl -s "http://${DESKTOP_PEER}:7700/health" > /dev/null 2>&1; then
echo "✅ Desktop online (${DESKTOP_PEER}:7700)"
else
echo "⚠️ Desktop offline — mode autonome"
fi
# 4. Écart embeddings
echo ""
echo "--- 4/4 Embeddings ---"
bash "$BRAIN_ROOT/scripts/brain-sync-replica.sh" status 2>&1
echo ""
echo "=== Brain laptop prêt ==="
echo " brain-engine : http://localhost:7700"
echo " BSI network : http://localhost:7700/bsi/network"
echo ""
echo " Ctrl+C pour arrêter"
wait "$PID_SERVER"

332
scripts/brain-state-bot.sh Executable file
View File

@@ -0,0 +1,332 @@
#!/bin/bash
# brain-state-bot.sh — tier free
# Lit les claims ouverts + git log → écrit/met à jour workspace/live-states.md
# Commit live-states.md avec "live-states: bot update"
#
# Usage : bash scripts/brain-state-bot.sh [--dry-run]
#
# Règles :
# - Ne ferme pas les claims BSI
# - Ne lit pas MYSECRETS
# - Silencieux sauf erreur critique (stderr)
# - Ne jamais écraser `needs` si déjà présent
set -uo pipefail
BRAIN_ROOT="${BRAIN_ROOT:-/home/tetardtek/Dev/Brain}"
LIVE_STATES="$BRAIN_ROOT/workspace/live-states.md"
DRY_RUN=0
# ─── Args ──────────────────────────────────────────────────────────────────
for arg in "$@"; do
case "$arg" in
--dry-run) DRY_RUN=1 ;;
esac
done
# ─── Helpers ───────────────────────────────────────────────────────────────
_now_iso() {
date +"%Y-%m-%dT%H:%M"
}
# Convertit un timestamp ISO8601 (YYYY-MM-DDTHH:MM) en epoch seconds
_iso_to_epoch() {
local ts="$1"
# Remplacer T par espace pour date
date -d "${ts/T/ }" +%s 2>/dev/null || echo 0
}
# Extrait un champ YAML simple (key: value) depuis un fichier
_yaml_field() {
local file="$1" key="$2"
grep -E "^${key}:[[:space:]]" "$file" 2>/dev/null \
| head -1 \
| sed "s/^${key}:[[:space:]]*//" \
| tr -d '"' \
| xargs
}
# Dérive le slug projet depuis le filename du claim
# sess-YYYYMMDD-HHMM-slug1-slug2 → slug1 (premier segment après timestamp)
_derive_project() {
local sess_id="$1"
# Retirer "sess-YYYYMMDD-HHMM-" puis prendre le premier segment
local remainder
remainder=$(echo "$sess_id" | sed 's/^sess-[0-9]\{8\}-[0-9]\{4\}-//')
# Retirer suffixes connus (boot, brain, supervisor…) si présent après "-"
echo "$remainder" | cut -d'-' -f1
}
# Dérive le slug depuis le champ scope du claim
# scope: "originsdigital-back/" → "originsdigital"
# scope: "brain/" → "brain"
_project_from_scope() {
local scope="$1"
# Prendre le premier token, retirer trailing slash, puis garder partie avant "-"
local first_token
first_token=$(echo "$scope" | awk '{print $1}' | tr -d '/')
# Si contient "-", prendre la partie avant le dernier tiret
# ex: originsdigital-back → originsdigital
# ex: brain → brain
echo "$first_token" | sed 's/-[^-]*$//' | sed 's/\///'
}
# Cherche un repo git pour un slug projet
# Cherche dans Brain/, Gitea/, Github/ (insensible à la casse)
_find_project_repo() {
local slug="$1"
local candidates=(
"$BRAIN_ROOT"
"$BRAIN_ROOT/brain-ui"
"$BRAIN_ROOT/brain-engine"
"/home/tetardtek/Dev/Gitea"
"/home/tetardtek/Dev/Github"
)
# Match direct : brain → BRAIN_ROOT
if [ "$slug" = "brain" ]; then
echo "$BRAIN_ROOT"
return
fi
# Chercher un répertoire qui contient le slug (insensible à la casse)
for base in "${candidates[@]}"; do
[ -d "$base" ] || continue
# Vérifier si base lui-même match (ex: brain-ui)
local basename
basename=$(basename "$base" | tr '[:upper:]' '[:lower:]')
local slug_lc
slug_lc=$(echo "$slug" | tr '[:upper:]' '[:lower:]')
if [[ "$basename" == *"$slug_lc"* ]] && [ -d "$base/.git" ]; then
echo "$base"
return
fi
# Chercher sous-répertoires
if [ -d "$base" ]; then
local found
found=$(find "$base" -maxdepth 1 -type d -iname "*${slug}*" 2>/dev/null | head -1)
if [ -n "$found" ] && [ -d "$found/.git" ]; then
echo "$found"
return
fi
fi
done
echo ""
}
# Obtient le dernier commit message d'un repo
_git_last_commit() {
local repo="$1"
[ -d "$repo/.git" ] || { echo ""; return; }
git -C "$repo" log --oneline -1 2>/dev/null | sed 's/^[a-f0-9]* //' | head -c 80
}
# Obtient le timestamp du dernier commit (epoch)
_git_last_commit_epoch() {
local repo="$1"
[ -d "$repo/.git" ] || { echo "0"; return; }
git -C "$repo" log -1 --format="%ct" 2>/dev/null || echo "0"
}
# ─── Lecture de l'état courant de live-states.md ────────────────────────────
# Extrait un champ YAML d'une entrée de live-states.md identifiée par sess_id
# Retourne "" si le champ n'existe pas ou si le sess_id n'est pas trouvé
_get_existing_field() {
local sess_id="$1" field="$2"
local in_block=0 value=""
while IFS= read -r line; do
# Début de bloc : ligne "- sess_id: <id>"
if echo "$line" | grep -qE "^- sess_id:[[:space:]]*${sess_id}[[:space:]]*$"; then
in_block=1
continue
fi
# Fin de bloc : nouvelle entrée "- sess_id:" ou fin du fichier
if [ "$in_block" -eq 1 ]; then
if echo "$line" | grep -qE "^- sess_id:"; then
break
fi
# Lire le champ demandé
if echo "$line" | grep -qE "^[[:space:]]+${field}:[[:space:]]"; then
value=$(echo "$line" | sed "s/^[[:space:]]*${field}:[[:space:]]*//" | tr -d '"')
fi
fi
done < "$LIVE_STATES"
echo "$value"
}
# ─── Écriture d'un bloc dans live-states.md ─────────────────────────────────
# Met à jour ou insère un bloc sess_id dans live-states.md
# Args: sess_id project doing status needs priority updated
_upsert_block() {
local sess_id="$1"
local project="$2"
local doing="$3"
local status="$4"
local needs="$5"
local priority="$6"
local updated="$7"
local new_block
new_block="- sess_id: ${sess_id}
project: ${project}
doing: \"${doing}\"
status: ${status}
needs: ${needs}
priority: ${priority}
team: []
blocking: []
context: \"\"
updated: ${updated}"
if [ "$DRY_RUN" -eq 1 ]; then
echo "[dry-run] bloc à écrire pour ${sess_id}:"
echo "$new_block"
return
fi
# Vérifier si le bloc existe déjà
if grep -qE "^- sess_id:[[:space:]]*${sess_id}[[:space:]]*$" "$LIVE_STATES" 2>/dev/null; then
# Mise à jour différentielle : remplacer le bloc existant
# Utilise python3 pour éviter les conflits de syntaxe awk/bash
local tmpfile
tmpfile=$(mktemp)
python3 - "$LIVE_STATES" "$sess_id" "$new_block" > "$tmpfile" << 'PYEOF'
import sys, re
infile = sys.argv[1]
sess_id = sys.argv[2]
new_block = sys.argv[3]
with open(infile) as f:
lines = f.readlines()
out = []
in_block = False
for line in lines:
if re.match(r'^- sess_id:\s*' + re.escape(sess_id) + r'\s*$', line):
in_block = True
out.append(new_block + "\n")
continue
if in_block:
# Fin du bloc : nouvelle entrée, frontmatter ou commentaire niveau 0
if re.match(r'^- sess_id:', line) or re.match(r'^---', line) or re.match(r'^#', line):
in_block = False
out.append(line)
# else : ignorer les lignes de l'ancien bloc
else:
out.append(line)
sys.stdout.write("".join(out))
PYEOF
mv "$tmpfile" "$LIVE_STATES"
else
# Insertion : ajouter à la fin avec ligne vide de séparation
echo "" >> "$LIVE_STATES"
echo "$new_block" >> "$LIVE_STATES"
fi
}
# ─── Main ────────────────────────────────────────────────────────────────────
[ -f "$LIVE_STATES" ] || { echo "CRITICAL: $LIVE_STATES introuvable" >&2; exit 1; }
NOW_EPOCH=$(date +%s)
TWO_HOURS=7200
UPDATED=0 # Nombre de sessions mises à jour
for claim in "$BRAIN_ROOT/claims"/sess-*.yml; do
[ -f "$claim" ] || continue
# Lire les champs du claim
status=$(_yaml_field "$claim" "status")
[ "$status" = "open" ] || continue
sess_id=$(_yaml_field "$claim" "sess_id")
[ -n "$sess_id" ] || continue
scope=$(_yaml_field "$claim" "scope")
opened_at=$(_yaml_field "$claim" "opened_at")
# Dériver le projet depuis scope, puis depuis sess_id en fallback
project=""
if [ -n "$scope" ]; then
project=$(_project_from_scope "$scope")
fi
if [ -z "$project" ]; then
project=$(_derive_project "$sess_id")
fi
[ -n "$project" ] || project="unknown"
# Trouver le repo git du projet
repo=$(_find_project_repo "$project")
# Dériver doing depuis le dernier commit git
doing=""
if [ -n "$repo" ]; then
doing=$(_git_last_commit "$repo")
fi
[ -n "$doing" ] || doing="En cours"
# Récupérer l'état courant du bloc (si existant)
existing_needs=$(_get_existing_field "$sess_id" "needs")
existing_status=$(_get_existing_field "$sess_id" "status")
existing_updated=$(_get_existing_field "$sess_id" "updated")
# needs : ne jamais écraser si déjà présent
needs="${existing_needs:-none}"
# Si needs est vide string, mettre none
[ -n "$needs" ] || needs="none"
# Stale detection : si updated > 2h + status progressing + pas de commit récent
new_status="progressing"
if [ -n "$existing_status" ] && [ "$existing_status" != "closed" ]; then
new_status="$existing_status"
fi
if [ "$new_status" = "progressing" ]; then
# Vérifier si stale
stale=0
if [ -n "$existing_updated" ]; then
updated_epoch=$(_iso_to_epoch "$existing_updated")
age=$(( NOW_EPOCH - updated_epoch ))
if [ "$age" -gt "$TWO_HOURS" ]; then
# Pas de commit récent ?
last_commit_epoch=0
if [ -n "$repo" ]; then
last_commit_epoch=$(_git_last_commit_epoch "$repo")
fi
commit_age=$(( NOW_EPOCH - last_commit_epoch ))
if [ "$commit_age" -gt "$TWO_HOURS" ]; then
stale=1
fi
fi
fi
if [ "$stale" -eq 1 ]; then
new_status="idle"
echo "stale: ${sess_id} → idle" >&2
fi
fi
# Priority : medium par défaut (tier free — pas de blocking[] cross-claim)
priority="medium"
# Updated : maintenant
updated_ts=$(_now_iso)
_upsert_block "$sess_id" "$project" "$doing" "$new_status" "$needs" "$priority" "$updated_ts"
UPDATED=$(( UPDATED + 1 ))
done
# Commit si des sessions ont été mises à jour (et pas dry-run)
if [ "$DRY_RUN" -eq 0 ] && [ "$UPDATED" -gt 0 ]; then
git -C "$BRAIN_ROOT" add workspace/live-states.md 2>/dev/null
git -C "$BRAIN_ROOT" diff --cached --quiet 2>/dev/null || \
git -C "$BRAIN_ROOT" commit -m "live-states: bot update" 2>/dev/null
fi
exit 0

188
scripts/brain-sync-replica.sh Executable file
View File

@@ -0,0 +1,188 @@
#!/usr/bin/env bash
# brain-sync-replica.sh — Réplication master → replica (embeddings)
# Le desktop est source de vérité. Le laptop reçoit une copie read-only.
#
# Usage :
# brain-sync-replica.sh status → écart master/replica
# brain-sync-replica.sh sync <replica_host> → sync vers replica
# brain-sync-replica.sh sync laptop → alias pour le peer "laptop"
#
# Prérequis : SSH sans mot de passe vers la replica
# Ne sync QUE la table embeddings — pas claims, pas locks (BSI local à chaque machine)
set -euo pipefail
BRAIN_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
DB_PATH="$BRAIN_ROOT/brain.db"
REMOTE_DB_PATH="Dev/Brain/brain.db"
# Résoudre le peer depuis brain-compose.local.yml
resolve_peer() {
local name="$1"
python3 - "$BRAIN_ROOT/brain-compose.local.yml" "$name" << 'PY'
import sys, yaml
with open(sys.argv[1]) as f:
data = yaml.safe_load(f) or {}
peers = data.get('peers', {})
peer = peers.get(sys.argv[2], {})
url = peer.get('url', '')
# Extraire host depuis http://192.168.1.10:7700
if '://' in url:
host = url.split('://')[1].split(':')[0]
print(host)
PY
}
# --- STATUS ---
cmd_status() {
local local_count local_updated
local_count=$(python3 "$BRAIN_ROOT/scripts/bsi-db.py" "SELECT COUNT(*) FROM embeddings WHERE indexed=1")
local_updated=$(python3 "$BRAIN_ROOT/scripts/bsi-db.py" "SELECT MAX(updated_at) FROM embeddings")
echo "=== Embedding master (local) ==="
echo " Chunks indexés : $local_count"
echo " Dernier update : $local_updated"
# Check peers
local compose="$BRAIN_ROOT/brain-compose.local.yml"
if [ -f "$compose" ]; then
echo ""
echo "=== Peers ==="
python3 - "$compose" << 'PY'
import yaml, json, urllib.request
with open(__import__('sys').argv[1]) as f:
data = yaml.safe_load(f) or {}
for name, peer in data.get('peers', {}).items():
if not peer.get('active', False):
continue
url = peer.get('url', '').rstrip('/')
try:
with urllib.request.urlopen(f"{url}/health", timeout=3) as r:
health = json.loads(r.read())
indexed = health.get('indexed', '?')
print(f" {name}: {indexed} chunks (online)")
except Exception:
print(f" {name}: offline")
PY
fi
}
# --- SYNC ---
cmd_sync() {
local target="$1"
local host
# Résoudre si c'est un nom de peer
host=$(resolve_peer "$target" 2>/dev/null || echo "")
if [ -z "$host" ]; then
host="$target"
fi
local user="tetardtek"
local remote="${user}@${host}"
echo "=== Sync embeddings → $remote ==="
# 1. Check connexion
if ! ssh -o ConnectTimeout=3 "$remote" "echo ok" > /dev/null 2>&1; then
echo "❌ SSH unreachable : $remote"
exit 1
fi
# 2. Stats locales
local local_count
local_count=$(python3 "$BRAIN_ROOT/scripts/bsi-db.py" "SELECT COUNT(*) FROM embeddings WHERE indexed=1")
echo " Master : $local_count chunks"
# 3. Stats replica
local remote_count
remote_count=$(ssh "$remote" "python3 ~/Dev/Brain/scripts/bsi-db.py 'SELECT COUNT(*) FROM embeddings WHERE indexed=1' 2>/dev/null || echo 0")
echo " Replica : $remote_count chunks"
local delta=$((local_count - remote_count))
if [ "$delta" -eq 0 ]; then
echo "✅ Déjà synchronisé — 0 écart"
exit 0
fi
echo " Écart : $delta chunks"
echo ""
# 4. Export embeddings → fichier temporaire
local tmp="/tmp/brain-embeddings-sync.db"
echo " Exporting embeddings table..."
python3 - "$DB_PATH" "$tmp" << 'PY'
import sqlite3, sys
src = sqlite3.connect(sys.argv[1])
dst = sqlite3.connect(sys.argv[2])
dst.execute("DROP TABLE IF EXISTS embeddings")
# Copy schema
schema = src.execute("SELECT sql FROM sqlite_master WHERE type='table' AND name='embeddings'").fetchone()[0]
dst.execute(schema)
# Copy data
rows = src.execute("SELECT * FROM embeddings").fetchall()
cols = [d[0] for d in src.execute("PRAGMA table_info(embeddings)").fetchall()]
placeholders = ','.join(['?'] * len(cols))
dst.executemany(f"INSERT INTO embeddings VALUES ({placeholders})", rows)
dst.commit()
dst.close()
src.close()
print(f" ✅ {len(rows)} chunks exportés")
PY
# 5. SCP vers replica
echo " Transferring to $remote..."
scp -q "$tmp" "${remote}:/tmp/brain-embeddings-sync.db"
# 6. Import sur replica
ssh "$remote" python3 - << 'PY'
import sqlite3
src = sqlite3.connect("/tmp/brain-embeddings-sync.db")
dst = sqlite3.connect("/home/tetardtek/Dev/Brain/brain.db")
# Drop and recreate
dst.execute("DROP TABLE IF EXISTS embeddings")
schema = src.execute("SELECT sql FROM sqlite_master WHERE type='table' AND name='embeddings'").fetchone()[0]
dst.execute(schema)
rows = src.execute("SELECT * FROM embeddings").fetchall()
cols = [d[0] for d in src.execute("PRAGMA table_info(embeddings)").fetchall()]
placeholders = ','.join(['?'] * len(cols))
dst.executemany(f"INSERT INTO embeddings VALUES ({placeholders})", rows)
dst.commit()
dst.close()
src.close()
print(f" ✅ {len(rows)} chunks importés sur replica")
PY
# 7. Cleanup
rm -f "$tmp"
ssh "$remote" "rm -f /tmp/brain-embeddings-sync.db"
# 8. Verify
local new_count
new_count=$(ssh "$remote" "python3 ~/Dev/Brain/scripts/bsi-db.py 'SELECT COUNT(*) FROM embeddings WHERE indexed=1' 2>/dev/null || echo '?'")
echo ""
echo "=== Sync terminé ==="
echo " Master : $local_count chunks"
echo " Replica : $new_count chunks"
if [ "$local_count" = "$new_count" ]; then
echo " ✅ Synchronisé — 0 écart"
else
echo " ⚠️ Écart résiduel : $((local_count - new_count))"
fi
}
# --- Router ---
CMD="${1:-}"
case "$CMD" in
status) cmd_status ;;
sync) cmd_sync "${2:-}" ;;
*)
echo "Usage : brain-sync-replica.sh <status|sync>"
echo ""
echo " status → écart master/replica"
echo " sync <host|peer_name> → sync embeddings vers replica"
echo ""
echo " Exemple : brain-sync-replica.sh sync laptop"
exit 1
;;
esac

View File

@@ -0,0 +1,92 @@
#!/usr/bin/env bash
# brain-template-export.sh — Extrait brain-template.db depuis brain.db (kernel+public only)
# Usage: bash scripts/brain-template-export.sh [output_path]
#
# Fast path : copie les vecteurs existants, pas besoin d'Ollama.
# Zéro table session (claims, signals, handoffs, sessions, agent_loads, locks, circuit_breaker, agent_memory).
set -euo pipefail
BRAIN_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
SRC="${BRAIN_ROOT}/brain.db"
DST="${1:-${BRAIN_ROOT}/brain-template.db}"
if [[ ! -f "$SRC" ]]; then
echo "❌ brain.db introuvable : $SRC" >&2
exit 1
fi
echo "brain-template-export : $SRC$DST"
echo "Scopes inclus : kernel, public"
python3 - "$SRC" "$DST" << 'PY'
import sqlite3
import sys
src_path = sys.argv[1]
dst_path = sys.argv[2]
# Connexion source (lecture seule)
src = sqlite3.connect(f'file:{src_path}?mode=ro', uri=True)
src.row_factory = sqlite3.Row
# Créer le template DB
dst = sqlite3.connect(dst_path)
dst.execute("PRAGMA journal_mode=WAL")
# Créer la table embeddings (seule table du template)
dst.execute("""
CREATE TABLE IF NOT EXISTS embeddings (
chunk_id TEXT PRIMARY KEY,
filepath TEXT NOT NULL,
title TEXT,
chunk_text TEXT NOT NULL,
vector BLOB,
model TEXT,
indexed INTEGER DEFAULT 0,
scope TEXT NOT NULL DEFAULT 'work',
created_at TEXT NOT NULL DEFAULT (datetime('now')),
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
)
""")
dst.execute("CREATE INDEX IF NOT EXISTS idx_emb_filepath ON embeddings(filepath)")
dst.execute("CREATE INDEX IF NOT EXISTS idx_emb_indexed ON embeddings(indexed)")
dst.execute("CREATE INDEX IF NOT EXISTS idx_emb_scope ON embeddings(scope)")
dst.commit()
# Copier uniquement les embeddings kernel + public
ALLOWED_SCOPES = ('kernel', 'public')
placeholders = ','.join('?' * len(ALLOWED_SCOPES))
rows = src.execute(f"""
SELECT chunk_id, filepath, title, chunk_text, vector, model, indexed, scope, created_at, updated_at
FROM embeddings
WHERE indexed = 1 AND vector IS NOT NULL AND scope IN ({placeholders})
""", ALLOWED_SCOPES).fetchall()
for r in rows:
dst.execute("""
INSERT OR REPLACE INTO embeddings
(chunk_id, filepath, title, chunk_text, vector, model, indexed, scope, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", tuple(r))
dst.commit()
dst.execute("VACUUM")
# Stats
total = len(rows)
scopes = {}
for r in rows:
s = r['scope']
scopes[s] = scopes.get(s, 0) + 1
src.close()
dst.close()
print(f"✅ Template généré : {dst_path}")
print(f" Chunks : {total}")
for s, c in sorted(scopes.items()):
print(f" - {s} : {c}")
print(f" Tables session : 0 (aucune)")
PY

45
scripts/brain-template-push.sh Executable file
View File

@@ -0,0 +1,45 @@
#!/usr/bin/env bash
# brain-template-push.sh — Export brain-template.db + push vers VPS + restart
# Usage: bash scripts/brain-template-push.sh
#
# Workflow : export local → scp → restart brain-engine sur VPS
# Prérequis : VPS_IP et VPS_SSH_USER dans MYSECRETS
set -euo pipefail
BRAIN_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
TEMPLATE_DB="${BRAIN_ROOT}/brain-template.db"
SECRETS="${HOME}/Dev/BrainSecrets/MYSECRETS"
# Lire VPS config depuis MYSECRETS (silencieux — pas de valeur affichée)
if [[ ! -f "$SECRETS" ]]; then
echo "❌ MYSECRETS introuvable" >&2
exit 1
fi
VPS_IP=$(grep '^VPS_IP=' "$SECRETS" | cut -d= -f2-)
VPS_USER=$(grep '^VPS_SSH_USER=' "$SECRETS" | cut -d= -f2-)
if [[ -z "$VPS_IP" || -z "$VPS_USER" ]]; then
echo "❌ VPS_IP ou VPS_SSH_USER manquant dans MYSECRETS" >&2
exit 1
fi
# Step 1 : Export
echo "1/3 Export brain-template.db..."
bash "${BRAIN_ROOT}/scripts/brain-template-export.sh" "$TEMPLATE_DB"
# Step 2 : SCP
echo ""
echo "2/3 Push vers VPS..."
scp -q "$TEMPLATE_DB" "${VPS_USER}@${VPS_IP}:~/Dev/Brain/brain-template.db"
echo "✅ brain-template.db transféré"
# Step 3 : Restart
echo ""
echo "3/3 Restart brain-engine..."
ssh "${VPS_USER}@${VPS_IP}" "sudo systemctl restart brain-engine"
echo "✅ brain-engine redémarré"
echo ""
echo "🏁 Template déployé sur VPS — brain.tetardtek.com sert le template."

View File

@@ -4,17 +4,16 @@
# Détecte les changements dans BRAIN-INDEX.md → notifie via Telegram
#
# Setup VPS (une seule fois) :
# 1. Copier ce script sur le VPS : scp brain-watch-vps.sh root@<VPS_IP>:/home/<user>/brain-watch/
# 1. Copier ce script sur le VPS : scp brain-watch-vps.sh root@VPS:/home/tetardtek/brain-watch/
# 2. Copier brain-notify.sh aussi
# 3. Cloner le brain : git clone git@<GITEA_URL>:<USERNAME>/brain.git /home/<user>/brain-watch/brain
# 4. Copier MYSECRETS sur le VPS : scp MYSECRETS root@<VPS_IP>:/home/<user>/brain-watch/
# 3. Cloner le brain : git clone git@git.tetardtek.com:Tetardtek/brain.git /home/tetardtek/brain-watch/brain
# 4. Copier MYSECRETS sur le VPS : scp MYSECRETS root@VPS:/home/tetardtek/brain-watch/
# 5. Installer le service systemd : install-brain-watch-vps.sh
# 6. systemctl start brain-watch && systemctl enable brain-watch
set -euo pipefail
# Configurable — override via env ou MYSECRETS (VPS_WATCH_ROOT=...)
WATCH_ROOT="${VPS_WATCH_ROOT:-$HOME/brain-watch}"
WATCH_ROOT="/home/tetardtek/brain-watch"
BRAIN_INDEX="$WATCH_ROOT/brain/BRAIN-INDEX.md"
NOTIFY="$WATCH_ROOT/brain-notify.sh"
BRAIN_ROOT="$WATCH_ROOT" # pour brain-notify.sh — lit MYSECRETS ici
@@ -24,8 +23,7 @@ LOG_PREFIX="[brain-watch-vps]"
export BRAIN_ROOT
if [[ ! -d "$WATCH_ROOT/brain" ]]; then
BRAIN_GIT_URL="${BRAIN_GIT_URL:-$(grep '^BRAIN_GIT_URL=' "$WATCH_ROOT/MYSECRETS" 2>/dev/null | cut -d= -f2-)}"
echo "$LOG_PREFIX ERREUR : brain non cloné. Lancer : git clone $BRAIN_GIT_URL $WATCH_ROOT/brain" >&2
echo "$LOG_PREFIX ERREUR : brain non cloné. Lancer : git clone git@git.tetardtek.com:Tetardtek/brain.git $WATCH_ROOT/brain" >&2
exit 1
fi

50
scripts/bsi-db.py Normal file
View File

@@ -0,0 +1,50 @@
#!/usr/bin/env python3
"""
bsi-db.py — Wrapper SQLite léger pour les scripts BSI bash.
Remplace sqlite3 CLI (pas toujours installé).
Usage :
python3 scripts/bsi-db.py "SELECT * FROM claims" → query, pipe-separated
python3 scripts/bsi-db.py -exec "INSERT INTO ..." → write (no output)
python3 scripts/bsi-db.py -script "CREATE TABLE ...; ..." → multi-statement
"""
import sys
import sqlite3
from pathlib import Path
DB_PATH = str(Path(__file__).parent.parent / 'brain.db')
def main():
if len(sys.argv) < 2:
print("Usage: bsi-db.py [-exec|-script] <sql>", file=sys.stderr)
sys.exit(1)
mode = 'query'
sql = sys.argv[1]
if sys.argv[1] == '-exec':
mode = 'exec'
sql = sys.argv[2] if len(sys.argv) > 2 else ''
elif sys.argv[1] == '-script':
mode = 'script'
sql = sys.argv[2] if len(sys.argv) > 2 else ''
conn = sqlite3.connect(DB_PATH)
conn.execute("PRAGMA journal_mode=WAL")
try:
if mode == 'script':
conn.executescript(sql)
elif mode == 'exec':
conn.execute(sql)
conn.commit()
else:
rows = conn.execute(sql).fetchall()
for row in rows:
print('|'.join(str(v) if v is not None else '' for v in row))
finally:
conn.close()
if __name__ == '__main__':
main()

105
scripts/bsi-peer-poll.sh Executable file
View File

@@ -0,0 +1,105 @@
#!/usr/bin/env bash
# bsi-peer-poll.sh — Poll les peers et écrit l'état dans workspace/live-states.md
# Cron : */5 * * * * bash ~/Dev/Brain/scripts/bsi-peer-poll.sh
#
# Écrit un snapshot lisible par time-anchor (session-navigate L1).
# Si rien n'a changé depuis le dernier poll → pas de réécriture (idempotent).
# Si un peer est injoignable → marqué offline, pas d'erreur.
set -euo pipefail
BRAIN_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
COMPOSE_LOCAL="$BRAIN_ROOT/brain-compose.local.yml"
LIVE_STATES="$BRAIN_ROOT/workspace/live-states.md"
TIMESTAMP=$(date '+%Y-%m-%d %H:%M')
mkdir -p "$BRAIN_ROOT/workspace"
# Collecter l'état local + peers
OUTPUT=$(python3 - "$BRAIN_ROOT" "$COMPOSE_LOCAL" "$TIMESTAMP" <<'PYEOF'
import yaml, subprocess, sys, os
brain_root = sys.argv[1]
compose_path = sys.argv[2]
timestamp = sys.argv[3]
# Machine locale
with open(compose_path) as f:
compose = yaml.safe_load(f)
machine = compose.get("machine", "unknown")
lines = []
lines.append(f"# live-states.md — snapshot {timestamp}")
lines.append(f"# Généré par bsi-peer-poll.sh — ne pas éditer manuellement")
lines.append("")
# Claims locaux
result = subprocess.run(
["bash", f"{brain_root}/scripts/bsi-query.sh", "open"],
capture_output=True, text=True, timeout=5
)
local_claims = result.stdout.strip()
lines.append(f"## {machine} (local)")
if local_claims:
for line in local_claims.split("\n"):
parts = line.split(" | ")
if len(parts) >= 4:
lines.append(f"- `{parts[0].strip()}` — {parts[1].strip()} — {parts[3].strip()}")
else:
lines.append("- (idle)")
lines.append("")
# Peers
peers = compose.get("peers", {})
for name, info in peers.items():
if not info.get("active", False):
continue
url = info.get("url", "")
host = url.replace("http://", "").replace("https://", "").split(":")[0]
try:
result = subprocess.run(
["ssh", "-o", "BatchMode=yes", "-o", "ConnectTimeout=3",
f"tetardtek@{host}",
f"cd ~/Dev/Brain && bash scripts/bsi-query.sh open 2>/dev/null"],
capture_output=True, text=True, timeout=10
)
peer_claims = result.stdout.strip()
lines.append(f"## {name} ({host})")
if peer_claims:
for line in peer_claims.split("\n"):
parts = line.split(" | ")
if len(parts) >= 4:
lines.append(f"- `{parts[0].strip()}` — {parts[1].strip()} — {parts[3].strip()}")
else:
lines.append("- (idle)")
except (subprocess.TimeoutExpired, Exception):
lines.append(f"## {name} ({host})")
lines.append("- (offline)")
lines.append("")
# Résumé
total_active = 0
if local_claims:
total_active += len(local_claims.strip().split("\n"))
lines.append(f"---")
lines.append(f"Dernière mise à jour : {timestamp}")
lines.append(f"Sessions actives : {total_active} local + peers")
print("\n".join(lines))
PYEOF
)
# Écrire uniquement si changement (éviter les écritures inutiles)
if [ -f "$LIVE_STATES" ]; then
# Comparer sans les timestamps (lignes 1-2)
OLD=$(tail -n +3 "$LIVE_STATES" | grep -v "Dernière mise à jour")
NEW=$(echo "$OUTPUT" | tail -n +3 | grep -v "Dernière mise à jour")
if [ "$OLD" = "$NEW" ]; then
exit 0
fi
fi
echo "$OUTPUT" > "$LIVE_STATES"

View File

@@ -9,6 +9,7 @@
# bsi-query.sh count-stale → nombre de claims stale (entier, stdout)
# bsi-query.sh signals → signaux pending (CHECKPOINT | HANDOFF | BLOCKED_ON)
# bsi-query.sh health → dernière session : health_score + type
# bsi-query.sh peers → claims open sur toutes les instances (SSH)
#
# Retour :
# Exit 0 = succès (même si 0 résultats)
@@ -26,7 +27,7 @@ CMD="${1:-help}"
# Fallback propre si brain.db absent
if [[ ! -f "$DB_PATH" ]]; then
echo "⚠️ brain.db absent ($DB_PATH) — lancer: brain-db-sync.sh (optionnel)" >&2
echo "⚠️ brain.db absent ($DB_PATH) — lancer: python3 brain-engine/migrate.py" >&2
exit 1
fi
@@ -106,4 +107,42 @@ conn.close()
PYEOF
}
# ── Commande peers : interroge les instances distantes via SSH ─────────
if [[ "$CMD" == "peers" ]]; then
COMPOSE_LOCAL="$BRAIN_ROOT/brain-compose.local.yml"
MACHINE=$(python3 -c "
import yaml
with open('$COMPOSE_LOCAL') as f:
print(yaml.safe_load(f).get('machine', 'unknown'))
" 2>/dev/null || echo "unknown")
echo "🖥 $MACHINE (local)"
run_query "open"
# Interroger chaque peer
python3 -c "
import yaml, subprocess, sys
with open('$COMPOSE_LOCAL') as f:
c = yaml.safe_load(f)
peers = c.get('peers', {})
for name, info in peers.items():
if not info.get('active', False):
continue
url = info.get('url', '')
host = url.replace('http://','').replace('https://','').split(':')[0]
print(f'PEER:{name}:{host}')
" 2>/dev/null | while IFS=: read -r _ name host; do
echo ""
echo "💻 $name ($host)"
result=$(ssh -o BatchMode=yes -o ConnectTimeout=3 "tetardtek@$host" \
"cd ~/Dev/Brain && bash scripts/bsi-query.sh open" 2>/dev/null)
if [[ -n "$result" ]]; then
echo "$result"
else
echo " (aucun claim ouvert ou machine injoignable)"
fi
done
exit 0
fi
run_query "$CMD"

61
scripts/dev-start.sh Executable file
View File

@@ -0,0 +1,61 @@
#!/usr/bin/env bash
# dev-start.sh — Démarre l'environnement dev brain local complet
# Usage : bash scripts/dev-start.sh
#
# Lance :
# 1. brain-engine/server.py → port 7700 (BRAIN_TIER=owner)
# 2. brain-ui (Vite) → port 5173
#
# Arrêt propre : Ctrl+C (trap SIGINT → kill les deux processus)
set -euo pipefail
BRAIN_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
LOG_SERVER="$BRAIN_ROOT/brain-engine/server-dev.log"
LOG_VITE="/tmp/vite-brain.log"
# Charger les secrets si disponibles (silencieux)
SECRETS_FILE="$HOME/Dev/BrainSecrets/MYSECRETS"
if [[ -f "$SECRETS_FILE" ]]; then
set -a && source "$SECRETS_FILE" && set +a
fi
# Override tier owner en dev — pas de token requis
export BRAIN_TIER=owner
cleanup() {
echo ""
echo "→ Arrêt dev-start..."
kill "$PID_SERVER" 2>/dev/null || true
kill "$PID_VITE" 2>/dev/null || true
exit 0
}
trap cleanup SIGINT SIGTERM
# Tuer les instances précédentes si elles tournent
lsof -ti:7700 | xargs kill 2>/dev/null || true
lsof -ti:5173 | xargs kill 2>/dev/null || true
sleep 1
echo "🧠 brain-engine → http://localhost:7700 (log: $LOG_SERVER)"
python3 "$BRAIN_ROOT/brain-engine/server.py" > "$LOG_SERVER" 2>&1 &
PID_SERVER=$!
echo "🎨 brain-ui → http://localhost:5173/ui/"
cd "$BRAIN_ROOT/brain-ui" && npm run dev > "$LOG_VITE" 2>&1 &
PID_VITE=$!
echo ""
echo "Ctrl+C pour tout arrêter"
echo "---"
# Attendre que les deux process soient up
sleep 3
if kill -0 "$PID_SERVER" 2>/dev/null && kill -0 "$PID_VITE" 2>/dev/null; then
echo "✅ brain-engine PID $PID_SERVER"
echo "✅ brain-ui PID $PID_VITE"
else
echo "❌ Un process n'a pas démarré — vérifier les logs"
fi
wait

335
scripts/diagram-init.sh Executable file
View File

@@ -0,0 +1,335 @@
#!/usr/bin/env bash
# diagram-init.sh — Génère le fichier .excalidraw initial depuis un workflow.yml
# Usage : bash scripts/diagram-init.sh <workflow-name>
# Exemple : bash scripts/diagram-init.sh superoauth-tier3
# Output : draw/diagrams/<name>.excalidraw
BRAIN_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
WORKFLOW_NAME="${1:-}"
if [[ -z "$WORKFLOW_NAME" ]]; then
echo "Usage : bash scripts/diagram-init.sh <workflow-name>"
echo "Exemple : bash scripts/diagram-init.sh superoauth-tier3"
exit 1
fi
WORKFLOW_FILE="$BRAIN_ROOT/workflows/${WORKFLOW_NAME}.yml"
OUTPUT_DIR="$BRAIN_ROOT/draw/diagrams"
OUTPUT_FILE="$OUTPUT_DIR/${WORKFLOW_NAME}.excalidraw"
if [[ ! -f "$WORKFLOW_FILE" ]]; then
echo "❌ Workflow introuvable : $WORKFLOW_FILE"
exit 1
fi
mkdir -p "$OUTPUT_DIR"
python3 - "$WORKFLOW_FILE" "$OUTPUT_FILE" << 'PYEOF'
import sys
import json
import yaml
import uuid
import time
workflow_path = sys.argv[1]
output_path = sys.argv[2]
with open(workflow_path) as f:
wf = yaml.safe_load(f)
name = wf.get("name", "workflow")
chain = wf.get("chain", [])
# Layout constants
NODE_W = 220
NODE_H = 90
NODE_GAP = 60
START_X = 40
START_Y = 120
ARROW_Y = START_Y + NODE_H // 2
# Colors
COLOR_PENDING = "#868e96" # gris — pending
COLOR_BORDER = "#343a40"
COLOR_BG_PAGE = "#f8f9fa"
elements = []
def make_id():
return str(uuid.uuid4())[:8]
# Title
elements.append({
"id": make_id(),
"type": "text",
"x": START_X,
"y": 40,
"width": len(name) * 12 + 40,
"height": 36,
"text": name,
"fontSize": 24,
"fontFamily": 1,
"textAlign": "left",
"verticalAlign": "top",
"strokeColor": COLOR_BORDER,
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 1,
"roughness": 0,
"opacity": 100,
"angle": 0,
"seed": 1,
"version": 1,
"isDeleted": False,
"groupIds": [],
"boundElements": [],
"updated": int(time.time()),
"link": None,
"locked": False,
})
node_ids = {}
for i, step in enumerate(chain):
n = step.get("step", i + 1)
stype = step.get("type", "")
angle = step.get("story_angle", "")
agents = step.get("agents", [])
gate = step.get("gate", None)
x = START_X + i * (NODE_W + NODE_GAP)
y = START_Y
node_id = f"{name}-step-{n}"
node_ids[n] = {"id": node_id, "x": x, "y": y}
# Gate badge (above node)
if gate:
gate_label = "⚡ gate:human" if gate == "human" else f"⚡ gate:{gate}"
elements.append({
"id": make_id(),
"type": "text",
"x": x,
"y": y - 28,
"width": NODE_W,
"height": 20,
"text": gate_label,
"fontSize": 13,
"fontFamily": 1,
"textAlign": "center",
"verticalAlign": "top",
"strokeColor": "#f39c12",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 1,
"roughness": 0,
"opacity": 100,
"angle": 0,
"seed": i + 100,
"version": 1,
"isDeleted": False,
"groupIds": [],
"boundElements": [],
"updated": int(time.time()),
"link": None,
"locked": False,
})
# Truncate story_angle
label_angle = (angle[:38] + "…") if len(angle) > 40 else angle
agents_str = " · ".join(agents[:3]) if agents else ""
label_text = f"step {n} [{stype}]\n{label_angle}\n⬜ pending"
elements.append({
"id": node_id,
"type": "rectangle",
"x": x,
"y": y,
"width": NODE_W,
"height": NODE_H,
"backgroundColor": COLOR_PENDING,
"strokeColor": COLOR_BORDER,
"fillStyle": "solid",
"strokeWidth": 2,
"roughness": 0,
"opacity": 80,
"angle": 0,
"seed": i + 10,
"version": 1,
"isDeleted": False,
"groupIds": [],
"boundElements": [],
"updated": int(time.time()),
"link": None,
"locked": False,
})
# Label inside node
elements.append({
"id": make_id(),
"type": "text",
"x": x + 10,
"y": y + 8,
"width": NODE_W - 20,
"height": NODE_H - 16,
"text": label_text,
"fontSize": 12,
"fontFamily": 1,
"textAlign": "left",
"verticalAlign": "top",
"strokeColor": "#ffffff",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 1,
"roughness": 0,
"opacity": 100,
"angle": 0,
"seed": i + 200,
"version": 1,
"isDeleted": False,
"groupIds": [],
"boundElements": [],
"updated": int(time.time()),
"link": None,
"locked": False,
})
# Agents badge (below node)
if agents_str:
elements.append({
"id": make_id(),
"type": "text",
"x": x,
"y": y + NODE_H + 6,
"width": NODE_W,
"height": 18,
"text": agents_str,
"fontSize": 11,
"fontFamily": 1,
"textAlign": "center",
"verticalAlign": "top",
"strokeColor": "#868e96",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 1,
"roughness": 0,
"opacity": 100,
"angle": 0,
"seed": i + 300,
"version": 1,
"isDeleted": False,
"groupIds": [],
"boundElements": [],
"updated": int(time.time()),
"link": None,
"locked": False,
})
# Arrows between nodes
for i in range(len(chain) - 1):
n_from = chain[i].get("step", i + 1)
n_to = chain[i + 1].get("step", i + 2)
if n_from not in node_ids or n_to not in node_ids:
continue
from_x = node_ids[n_from]["x"] + NODE_W
to_x = node_ids[n_to]["x"]
arr_y = START_Y + NODE_H // 2
# Detect type drift (code→deploy or deploy→code)
type_from = chain[i].get("type", "")
type_to = chain[i + 1].get("type", "")
is_drift = (type_from != type_to)
arrow_color = "#e74c3c" if is_drift else "#495057"
arr_id = make_id()
elements.append({
"id": arr_id,
"type": "arrow",
"x": from_x,
"y": arr_y,
"width": to_x - from_x,
"height": 0,
"points": [[0, 0], [to_x - from_x, 0]],
"strokeColor": arrow_color,
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": is_drift and 3 or 2,
"roughness": 0,
"opacity": 100,
"angle": 0,
"seed": i + 400,
"version": 1,
"isDeleted": False,
"groupIds": [],
"boundElements": [],
"updated": int(time.time()),
"link": None,
"locked": False,
"startBinding": None,
"endBinding": None,
"lastCommittedPoint": None,
"startArrowhead": None,
"endArrowhead": "arrow",
})
# Drift label
if is_drift:
mid_x = from_x + (to_x - from_x) // 2 - 40
elements.append({
"id": make_id(),
"type": "text",
"x": mid_x,
"y": arr_y - 22,
"width": 100,
"height": 18,
"text": f"⚠️ {type_from}→{type_to}",
"fontSize": 11,
"fontFamily": 1,
"textAlign": "center",
"verticalAlign": "top",
"strokeColor": "#e74c3c",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 1,
"roughness": 0,
"opacity": 100,
"angle": 0,
"seed": i + 500,
"version": 1,
"isDeleted": False,
"groupIds": [],
"boundElements": [],
"updated": int(time.time()),
"link": None,
"locked": False,
})
excalidraw = {
"type": "excalidraw",
"version": 2,
"source": "brain/diagram-init.sh",
"elements": elements,
"appState": {
"gridSize": None,
"viewBackgroundColor": COLOR_BG_PAGE,
},
"files": {}
}
with open(output_path, "w") as f:
json.dump(excalidraw, f, indent=2, ensure_ascii=False)
print(f"✅ {output_path}")
print(f" {len(chain)} steps — {len(elements)} éléments générés")
PYEOF
STATUS=$?
if [[ $STATUS -eq 0 ]]; then
echo ""
echo "→ Ouvrir dans draw.tetardtek.com ou commiter :"
echo " git -C $BRAIN_ROOT/draw add diagrams/${WORKFLOW_NAME}.excalidraw"
echo " git -C $BRAIN_ROOT/draw commit -m \"diagram: init ${WORKFLOW_NAME}\""
fi
exit $STATUS

119
scripts/diagram-patch.sh Executable file
View File

@@ -0,0 +1,119 @@
#!/usr/bin/env bash
# diagram-patch.sh — Patche un nœud dans un .excalidraw après signal BSI
# Usage : bash scripts/diagram-patch.sh <workflow-name> <step> <status>
# Status : done | gate | blocked | locked | circuit-break | abort
# Exemple : bash scripts/diagram-patch.sh superoauth-tier3 1 done
BRAIN_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
WORKFLOW_NAME="${1:-}"
STEP="${2:-}"
STATUS="${3:-}"
if [[ -z "$WORKFLOW_NAME" || -z "$STEP" || -z "$STATUS" ]]; then
echo "Usage : bash scripts/diagram-patch.sh <workflow-name> <step> <status>"
echo ""
echo "Status disponibles :"
echo " done → ✅ vert — step terminé"
echo " gate → ⚡ orange — gate:human en attente"
echo " blocked → ❌ rouge — BLOCKED_ON"
echo " locked → ⬜ gris — pas encore atteint"
echo " circuit-break → 🔴 rouge vif + bordure épaisse"
echo " abort → grisé — workflow aborted"
exit 1
fi
EXCALIDRAW="$BRAIN_ROOT/draw/diagrams/${WORKFLOW_NAME}.excalidraw"
if [[ ! -f "$EXCALIDRAW" ]]; then
echo "❌ Fichier introuvable : $EXCALIDRAW"
echo " → bash scripts/diagram-init.sh $WORKFLOW_NAME"
exit 1
fi
python3 - "$EXCALIDRAW" "$WORKFLOW_NAME" "$STEP" "$STATUS" << 'PYEOF'
import sys
import json
import time
excalidraw_path = sys.argv[1]
workflow_name = sys.argv[2]
step = sys.argv[3]
status = sys.argv[4]
# Color + label mapping
STATUS_MAP = {
"done": {"color": "#2ecc71", "label": "✅ done", "stroke": "#1a9e57", "width": 2},
"gate": {"color": "#f39c12", "label": "⚡ gate:human", "stroke": "#c87f0a", "width": 2},
"blocked": {"color": "#e74c3c", "label": "❌ blocked", "stroke": "#c0392b", "width": 2},
"locked": {"color": "#868e96", "label": "⬜ pending", "stroke": "#343a40", "width": 2},
"circuit-break": {"color": "#c0392b", "label": "🔴 circuit break","stroke": "#922b21", "width": 4},
"abort": {"color": "#adb5bd", "label": "aborted", "stroke": "#6c757d", "width": 1},
}
if status not in STATUS_MAP:
print(f"❌ Status inconnu : {status}")
print(f" Valeurs valides : {', '.join(STATUS_MAP.keys())}")
sys.exit(1)
cfg = STATUS_MAP[status]
node_id = f"{workflow_name}-step-{step}"
with open(excalidraw_path) as f:
data = json.load(f)
patched = False
elements = data.get("elements", [])
for el in elements:
if el.get("id") == node_id and el.get("type") == "rectangle":
el["backgroundColor"] = cfg["color"]
el["strokeColor"] = cfg["stroke"]
el["strokeWidth"] = cfg["width"]
el["updated"] = int(time.time())
patched = True
break
if not patched:
print(f"⚠️ Nœud introuvable : {node_id}")
print(f" → Vérifier que diagram-init.sh a bien été lancé pour ce workflow")
sys.exit(1)
# Update label text for the matching text element (right after the rectangle)
target_x = None
target_y = None
for el in elements:
if el.get("id") == node_id:
target_x = el["x"]
target_y = el["y"]
break
if target_x is not None:
for el in elements:
if (el.get("type") == "text"
and abs(el.get("x", 0) - target_x - 10) < 5
and abs(el.get("y", 0) - target_y - 8) < 5):
# Replace last line (status line) in the text
lines = el.get("text", "").split("\n")
if len(lines) >= 3:
lines[-1] = cfg["label"]
elif len(lines) > 0:
lines.append(cfg["label"])
el["text"] = "\n".join(lines)
el["updated"] = int(time.time())
break
data["elements"] = elements
with open(excalidraw_path, "w") as f:
json.dump(data, f, indent=2, ensure_ascii=False)
print(f"✅ {workflow_name} step {step} → {cfg['label']}")
PYEOF
PATCH_STATUS=$?
if [[ $PATCH_STATUS -eq 0 ]]; then
echo "→ Commiter le patch :"
echo " git -C $BRAIN_ROOT/draw add diagrams/${WORKFLOW_NAME}.excalidraw"
echo " git -C $BRAIN_ROOT/draw commit -m \"diagram: ${WORKFLOW_NAME} step ${STEP}${STATUS}\""
fi
exit $PATCH_STATUS

106
scripts/feature-gate-status.sh Executable file
View File

@@ -0,0 +1,106 @@
#!/bin/bash
# feature-gate-status.sh — État du feature-gate (tier actif + features enabled/disabled)
# Lecture seule. Aucune écriture.
#
# Usage :
# bash scripts/feature-gate-status.sh
set -uo pipefail
BRAIN_ROOT="$(git -C "$(dirname "$0")" rev-parse --show-toplevel)"
COMPOSE_FILE="$BRAIN_ROOT/brain-compose.local.yml"
# --- Lire le tier actif ---
_get_tier() {
[ -f "$COMPOSE_FILE" ] || { echo "free"; return; }
local tier="free"
if command -v python3 &>/dev/null && python3 -c "import yaml" &>/dev/null 2>&1; then
tier=$(BRAIN_COMPOSE="$COMPOSE_FILE" python3 - <<'PYEOF' 2>/dev/null
import yaml, os, sys
path = os.environ.get('BRAIN_COMPOSE', '')
try:
with open(path) as f:
data = yaml.safe_load(f)
instances = data.get('instances', {})
for name, inst in instances.items():
if inst.get('active'):
print(inst.get('feature_set', {}).get('tier', 'free'))
sys.exit(0)
except Exception:
pass
print('free')
PYEOF
)
else
tier=$(grep "^\s*tier:" "$COMPOSE_FILE" | head -1 | awk '{print $NF}' | tr -d "'\"")
fi
echo "${tier:-free}"
}
_tier_level() {
case "$1" in
free) echo 0 ;;
pro) echo 1 ;;
full) echo 2 ;;
*) echo 0 ;;
esac
}
# --- Mapping complet feature → tier minimum ---
declare -A FEATURE_MIN=(
[kernel.boot]="free"
[kernel.agents]="free"
[workflow.manual]="free"
[diagram.readonly]="free"
[bact.enrichment]="pro"
[workflow.orchestrated]="pro"
[diagram.interactive]="pro"
[supervisor.project]="pro"
[bact.rag]="full"
[diagram.actions]="full"
[distillation]="full"
)
# Ordre d'affichage
FEATURE_ORDER=(
kernel.boot kernel.agents workflow.manual diagram.readonly
bact.enrichment workflow.orchestrated diagram.interactive supervisor.project
bact.rag diagram.actions distillation
)
# --- Main ---
TIER=$(_get_tier)
LEVEL=$(_tier_level "$TIER")
echo "feature-gate — tier: $TIER"
echo "──────────────────────────────────────────────"
ENABLED_LIST=()
DISABLED_LIST=()
for feature in "${FEATURE_ORDER[@]}"; do
min_tier="${FEATURE_MIN[$feature]}"
required=$(_tier_level "$min_tier")
if [ "$LEVEL" -ge "$required" ]; then
ENABLED_LIST+=("$feature")
else
DISABLED_LIST+=("$feature (requires: $min_tier)")
fi
done
if [ "${#ENABLED_LIST[@]}" -gt 0 ]; then
echo " ✅ Enabled"
for f in "${ENABLED_LIST[@]}"; do
echo " + $f"
done
fi
if [ "${#DISABLED_LIST[@]}" -gt 0 ]; then
echo " ❌ Disabled"
for f in "${DISABLED_LIST[@]}"; do
echo " - $f"
done
fi
echo "──────────────────────────────────────────────"
echo " ${#ENABLED_LIST[@]} enabled / ${#DISABLED_LIST[@]} disabled"

View File

@@ -1,7 +1,7 @@
#!/bin/bash
# file-lock.sh — Mutex fichier BSI-v3-7
# file-lock.sh — Mutex fichier BSI-v3-7 (ADR-036 : brain.db)
# Empêche deux satellites d'écrire simultanément dans le même fichier.
# Complète le scope-lock BSI (niveau dossier) avec une granularité fichier.
# Source : table locks dans brain.db (ex : locks/*.lock)
#
# Usage :
# file-lock.sh acquire <filepath> <sess-id> [ttl_minutes] → acquiert le lock
@@ -18,15 +18,20 @@
set -euo pipefail
BRAIN_ROOT="$(git -C "$(dirname "$0")" rev-parse --show-toplevel)"
LOCKS_DIR="$BRAIN_ROOT/locks"
DB_PATH="$BRAIN_ROOT/brain.db"
DEFAULT_TTL=60 # minutes
mkdir -p "$LOCKS_DIR"
# Convertit un chemin fichier en nom de lock (remplace / et . par -)
filepath_to_lockname() {
echo "$1" | sed 's|/|-|g' | sed 's|\.|-|g' | sed 's|^-||'
}
# Init table si absente
python3 "$BRAIN_ROOT/scripts/bsi-db.py" -script "
CREATE TABLE IF NOT EXISTS locks (
id INTEGER PRIMARY KEY AUTOINCREMENT,
filepath TEXT NOT NULL UNIQUE,
holder TEXT NOT NULL,
claimed_at TEXT NOT NULL DEFAULT (datetime('now')),
expires_at TEXT NOT NULL,
ttl_min INTEGER NOT NULL DEFAULT 60
);
"
# --- ACQUIRE ---
cmd_acquire() {
@@ -34,44 +39,37 @@ cmd_acquire() {
local sess_id="$2"
local ttl="${3:-$DEFAULT_TTL}"
local lockname
lockname=$(filepath_to_lockname "$filepath")
local lockfile="$LOCKS_DIR/${lockname}.lock"
local now
now=$(date +%s)
local expires_at
expires_at=$(date -d "+${ttl} minutes" +%Y-%m-%dT%H:%M 2>/dev/null \
|| date -v+${ttl}M +%Y-%m-%dT%H:%M) # macOS compat
# Check existing active lock held by someone else
local existing
existing=$(python3 "$BRAIN_ROOT/scripts/bsi-db.py" "
SELECT holder, expires_at FROM locks
WHERE filepath = '$filepath'
AND julianday('now') < julianday(expires_at)
AND holder != '$sess_id'
LIMIT 1;
")
# Vérifier si lock existant et non expiré
if [ -f "$lockfile" ]; then
existing_holder=$(grep '^holder:' "$lockfile" | sed 's/holder: //')
existing_expires=$(grep '^expires_at:' "$lockfile" | sed 's/expires_at: //')
existing_epoch=$(date -d "$existing_expires" +%s 2>/dev/null \
|| date -j -f "%Y-%m-%dT%H:%M" "$existing_expires" +%s 2>/dev/null || echo 0)
if [ "$now" -lt "$existing_epoch" ]; then
echo "🔴 LOCK — $filepath"
echo " Détenu par : $existing_holder"
echo " Expire à : $existing_expires"
echo ""
echo " Attendre le release ou contacter : $existing_holder"
exit 1
else
# Lock expiré — on peut le prendre
echo "⚠️ Lock expiré de $existing_holder — acquisition automatique"
rm -f "$lockfile"
fi
if [ -n "$existing" ]; then
local holder expires
holder=$(echo "$existing" | cut -d'|' -f1)
expires=$(echo "$existing" | cut -d'|' -f2)
echo "🔴 LOCK — $filepath"
echo " Détenu par : $holder"
echo " Expire à : $expires"
echo ""
echo " Attendre le release ou contacter : $holder"
exit 1
fi
# Écrire le lock
cat > "$lockfile" << EOF
file: $filepath
holder: $sess_id
claimed_at: $(date +%Y-%m-%dT%H:%M)
expires_at: $expires_at
ttl_min: $ttl
EOF
# Upsert — remplace si même holder ou expiré
python3 "$BRAIN_ROOT/scripts/bsi-db.py" -script "
DELETE FROM locks WHERE filepath = '$filepath';
INSERT INTO locks (filepath, holder, claimed_at, expires_at, ttl_min)
VALUES ('$filepath', '$sess_id', datetime('now'), datetime('now', '+$ttl minutes'), $ttl);
"
local expires_at
expires_at=$(python3 "$BRAIN_ROOT/scripts/bsi-db.py" "SELECT expires_at FROM locks WHERE filepath = '$filepath';")
echo "✅ Lock acquis : $filepath"
echo " Session : $sess_id"
@@ -83,22 +81,20 @@ cmd_release() {
local filepath="$1"
local sess_id="$2"
local lockname
lockname=$(filepath_to_lockname "$filepath")
local lockfile="$LOCKS_DIR/${lockname}.lock"
local holder
holder=$(python3 "$BRAIN_ROOT/scripts/bsi-db.py" "SELECT holder FROM locks WHERE filepath = '$filepath';")
if [ ! -f "$lockfile" ]; then
if [ -z "$holder" ]; then
echo " Pas de lock actif sur : $filepath"
exit 0
fi
existing_holder=$(grep '^holder:' "$lockfile" | sed 's/holder: //')
if [ "$existing_holder" != "$sess_id" ]; then
echo "🚨 Release refusé — lock détenu par : $existing_holder (pas $sess_id)"
if [ "$holder" != "$sess_id" ]; then
echo "🚨 Release refusé — lock détenu par : $holder (pas $sess_id)"
exit 2
fi
rm -f "$lockfile"
python3 "$BRAIN_ROOT/scripts/bsi-db.py" -exec "DELETE FROM locks WHERE filepath = '$filepath' AND holder = '$sess_id'"
echo "✅ Lock libéré : $filepath"
}
@@ -106,88 +102,67 @@ cmd_release() {
cmd_check() {
local filepath="$1"
local lockname
lockname=$(filepath_to_lockname "$filepath")
local lockfile="$LOCKS_DIR/${lockname}.lock"
local row
row=$(python3 "$BRAIN_ROOT/scripts/bsi-db.py" "
SELECT holder, expires_at,
CASE WHEN julianday('now') < julianday(expires_at) THEN 'active' ELSE 'expired' END
FROM locks WHERE filepath = '$filepath';
")
if [ ! -f "$lockfile" ]; then
if [ -z "$row" ]; then
echo "✅ Libre : $filepath"
exit 0
fi
local now
now=$(date +%s)
existing_holder=$(grep '^holder:' "$lockfile" | sed 's/holder: //')
existing_expires=$(grep '^expires_at:' "$lockfile" | sed 's/expires_at: //')
existing_epoch=$(date -d "$existing_expires" +%s 2>/dev/null \
|| date -j -f "%Y-%m-%dT%H:%M" "$existing_expires" +%s 2>/dev/null || echo 0)
local holder expires status
holder=$(echo "$row" | cut -d'|' -f1)
expires=$(echo "$row" | cut -d'|' -f2)
status=$(echo "$row" | cut -d'|' -f3)
if [ "$now" -lt "$existing_epoch" ]; then
if [ "$status" = "active" ]; then
echo "🔴 Locké : $filepath"
echo " Holder : $existing_holder"
echo " Expire : $existing_expires"
echo " Holder : $holder"
echo " Expire : $expires"
else
echo "⚠️ Lock expiré (nettoyable) : $filepath"
echo " Ancien holder : $existing_holder"
echo " Ancien holder : $holder"
fi
}
# --- LIST ---
cmd_list() {
local locks
locks=$(find "$LOCKS_DIR" -name "*.lock" | sort)
local rows
rows=$(python3 "$BRAIN_ROOT/scripts/bsi-db.py" "
SELECT filepath, holder, expires_at,
CASE WHEN julianday('now') < julianday(expires_at) THEN 'actif' ELSE 'expiré' END
FROM locks ORDER BY claimed_at DESC;
")
if [ -z "$locks" ]; then
if [ -z "$rows" ]; then
echo "✅ Aucun lock actif"
exit 0
fi
local now
now=$(date +%s)
echo "Locks actifs :"
echo ""
while IFS= read -r lockfile; do
local file holder expires_at epoch status
file=$(grep '^file:' "$lockfile" | sed 's/file: *//')
holder=$(grep '^holder:' "$lockfile" | sed 's/holder: *//')
expires_at=$(grep '^expires_at:' "$lockfile" | sed 's/expires_at: *//')
epoch=$(date -d "$expires_at" +%s 2>/dev/null \
|| date -j -f "%Y-%m-%dT%H:%M" "$expires_at" +%s 2>/dev/null || echo 0)
if [ "$now" -lt "$epoch" ]; then
status="🔴 actif"
else
status="⚠️ expiré"
fi
echo " $status | $file | $holder | exp: $expires_at"
done <<< "$locks"
while IFS='|' read -r filepath holder expires status; do
local icon="🔴"
[ "$status" = "expiré" ] && icon="⚠️ "
echo " $icon $status | $filepath | $holder | exp: $expires"
done <<< "$rows"
}
# --- CLEANUP ---
cmd_cleanup() {
local now
now=$(date +%s)
local count=0
for lockfile in "$LOCKS_DIR"/*.lock; do
[ -f "$lockfile" ] || continue
expires_at=$(grep '^expires_at:' "$lockfile" | sed 's/expires_at: *//')
epoch=$(date -d "$expires_at" +%s 2>/dev/null \
|| date -j -f "%Y-%m-%dT%H:%M" "$expires_at" +%s 2>/dev/null || echo 0)
if [ "$now" -ge "$epoch" ]; then
file=$(grep '^file:' "$lockfile" | sed 's/file: *//')
rm -f "$lockfile"
echo "🗑️ Lock expiré supprimé : $file"
count=$((count + 1))
fi
done
local count
count=$(python3 "$BRAIN_ROOT/scripts/bsi-db.py" "
SELECT COUNT(*) FROM locks WHERE julianday('now') >= julianday(expires_at);
")
if [ "$count" -eq 0 ]; then
echo "✅ Aucun lock expiré à nettoyer"
else
python3 "$BRAIN_ROOT/scripts/bsi-db.py" -exec "DELETE FROM locks WHERE julianday('now') >= julianday(expires_at)"
echo "$count lock(s) nettoyé(s)"
fi
}

View File

@@ -25,7 +25,7 @@ set -euo pipefail
# Configuration — à adapter si besoin
# ---------------------------------------------------------------------------
WATCH_ROOT="${VPS_WATCH_ROOT:-$HOME/brain-watch}"
WATCH_ROOT="/home/tetardtek/brain-watch"
MYSECRETS="$WATCH_ROOT/MYSECRETS"
BOT_PORT=5001
BOT_SCRIPT="$WATCH_ROOT/brain-bot.py"
@@ -62,7 +62,7 @@ fi
# ---------------------------------------------------------------------------
echo ""
echo "Domaine pour le webhook (ex: bot.<OWNER_DOMAIN>) :"
echo "Domaine pour le webhook (ex: bot.tetardtek.com) :"
echo -n "→ "
read -r BOT_DOMAIN
@@ -94,7 +94,7 @@ After=network.target
[Service]
Type=simple
User=${VPS_SERVICE_USER:-$(whoami)}
User=tetardtek
WorkingDirectory=${WATCH_ROOT}
Environment=BRAIN_WATCH_ROOT=${WATCH_ROOT}
Environment=BRAIN_BOT_PORT=${BOT_PORT}

View File

@@ -6,7 +6,7 @@
# scripts/install-brain-hooks.sh --check → vérifie si les hooks sont installés
#
# Hooks installés :
# post-commit → déclenche brain-db-sync.sh si claims/ handoffs/ ou BRAIN-INDEX.md changent
# post-commit → déclenche brain-db-sync.sh si handoffs/ agents/ ou BRAIN-INDEX.md changent
#
# Idempotent — peut être relancé sans risque.
# À relancer sur chaque clone frais (hooks non versionnés dans git).
@@ -46,7 +46,7 @@ if [[ -f "$POST_COMMIT" ]] && ! grep -q "brain-db-sync" "$POST_COMMIT"; then
cat >> "$POST_COMMIT" <<'HOOK'
# Déclenche brain-db-sync.sh si claims, handoffs ou BRAIN-INDEX ont changé
_brain_changed=$(git diff HEAD~1 --name-only 2>/dev/null \
| grep -qE '^(claims/|handoffs/|BRAIN-INDEX\.md)' && echo yes || echo no)
| grep -qE '^(handoffs/|agents/|BRAIN-INDEX\.md)' && echo yes || echo no)
if [[ "$_brain_changed" == "yes" ]]; then
BRAIN_ROOT="$(git rev-parse --show-toplevel)"
bash "$BRAIN_ROOT/scripts/brain-db-sync.sh" --quiet || true
@@ -61,7 +61,7 @@ else
# Sync brain.db si claims, handoffs ou BRAIN-INDEX ont changé
_brain_changed=$(git diff HEAD~1 --name-only 2>/dev/null \
| grep -qE '^(claims/|handoffs/|BRAIN-INDEX\.md)' && echo yes || echo no)
| grep -qE '^(handoffs/|agents/|BRAIN-INDEX\.md)' && echo yes || echo no)
if [[ "$_brain_changed" == "yes" ]]; then
BRAIN_ROOT="$(git rev-parse --show-toplevel)"
bash "$BRAIN_ROOT/scripts/brain-db-sync.sh" --quiet || true
@@ -73,6 +73,6 @@ fi
echo ""
echo "Hooks brain actifs :"
echo " post-commit → brain-db-sync.sh (déclenché sur claims/ handoffs/ BRAIN-INDEX.md)"
echo " post-commit → brain-db-sync.sh (déclenché sur handoffs/ agents/ BRAIN-INDEX.md)"
echo ""
echo "Pour vérifier : scripts/install-brain-hooks.sh --check"

View File

@@ -13,13 +13,8 @@ TARGET="${1:-both}"
BRAIN_ROOT="${BRAIN_ROOT:-$HOME/Dev/Brain}"
VPS_USER="root"
VPS_IP=$(grep '^VPS_IP=' "$BRAIN_ROOT/MYSECRETS" | cut -d= -f2-)
# Configurable — lues depuis MYSECRETS si non définies en env
VPS_WATCH_ROOT="${VPS_WATCH_ROOT:-$(grep '^VPS_WATCH_ROOT=' "$BRAIN_ROOT/MYSECRETS" 2>/dev/null | cut -d= -f2- || echo "/home/$VPS_USER/brain-watch")}"
GITEA_BRAIN_URL="${BRAIN_GIT_URL:-$(grep '^BRAIN_GIT_URL=' "$BRAIN_ROOT/MYSECRETS" 2>/dev/null | cut -d= -f2-)}"
if [[ -z "$GITEA_BRAIN_URL" ]]; then
echo "❌ BRAIN_GIT_URL manquant — ajouter dans MYSECRETS : BRAIN_GIT_URL=git@<host>:<user>/brain.git"
exit 1
fi
VPS_WATCH_ROOT="/home/tetardtek/brain-watch"
GITEA_BRAIN_URL="git@git.tetardtek.com:Tetardtek/brain.git"
install_local() {
echo "=== Installation SUPERVISOR local (systemd user) ==="
@@ -106,7 +101,7 @@ After=network.target
[Service]
Type=simple
User=root
ExecStart=/home/<user>/brain-watch/brain-watch-vps.sh
ExecStart=/home/tetardtek/brain-watch/brain-watch-vps.sh
Restart=always
RestartSec=10
StandardOutput=journal

View File

@@ -27,7 +27,7 @@ ERROR_PATTERNS=(
)
# Patterns de chemin absolu — exclusions pour les placeholders templates
ABSOLUTE_PATH_PATTERN="/home/[a-z]" # ex: /home/alice — chemin réel, pas /home/<user>
ABSOLUTE_PATH_PATTERN="/home/[a-z]" # /home/tetardtek — chemin réel, pas /home/<user>
ABSOLUTE_PATH_EXCLUDE="<" # Exclure les lignes avec placeholder (<user>, <PATHS...)
# --- Patterns WARN : références documentaires — OK si contexte architecture ---
@@ -59,7 +59,7 @@ for pattern in "${ERROR_PATTERNS[@]}"; do
fi
done
# --- Scan ERROR — chemins absolus réels (ex: /home/<user>/, pas /home/<user>/) ---
# --- Scan ERROR — chemins absolus réels (ex: /home/tetardtek/, pas /home/<user>/) ---
while IFS= read -r -d '' file; do
# Cherche /home/[a-z] et exclut les lignes avec placeholder <
matches=$(grep -n "$ABSOLUTE_PATH_PATTERN" "$file" 2>/dev/null \

View File

@@ -0,0 +1,177 @@
#!/usr/bin/env python3
"""
migrate-claims-to-db.py — Migration one-shot : claims/*.yml → brain.db
ADR-036 : BSI hors git — les claims deviennent la source de vérité dans brain.db.
Usage :
python3 scripts/migrate-claims-to-db.py → migrer tout
python3 scripts/migrate-claims-to-db.py --dry-run → preview sans écriture
python3 scripts/migrate-claims-to-db.py --archive → migrer + archiver les .yml
Idempotent : INSERT OR IGNORE sur sess_id PRIMARY KEY.
"""
import os
import re
import sys
import sqlite3
import shutil
from pathlib import Path
from datetime import datetime, timedelta
BRAIN_ROOT = Path(__file__).parent.parent
CLAIMS_DIR = BRAIN_ROOT / 'claims'
DB_PATH = BRAIN_ROOT / 'brain.db'
ARCHIVE_DIR = BRAIN_ROOT / 'archive' / 'claims-git-era'
# Kernel scopes — synchronisé avec KERNEL.md
KERNEL_SCOPES = ['agents/', 'profil/', 'scripts/', 'KERNEL.md',
'brain-constitution.md', 'brain-compose.yml']
PERSONAL_SCOPES = ['profil/capital', 'profil/objectifs', 'progression/', 'MYSECRETS']
def extract(content, *patterns, default=''):
"""Extract first matching pattern from content."""
for p in patterns:
m = re.search(p, content, re.MULTILINE)
if m:
return m.group(1).strip().strip('"\'')
return default
def infer_zone(scope):
"""Infer zone from scope — ADR-014."""
for ks in KERNEL_SCOPES:
if ks in scope:
return 'kernel'
for ps in PERSONAL_SCOPES:
if ps in scope:
return 'personal'
return 'project'
def parse_claim(filepath):
"""Parse a claim YAML file into a dict."""
with open(filepath, 'r') as f:
content = f.read()
sess_id = extract(content, r'^sess_id:\s*(.+)', r'^name:\s*(sess-.+)')
if not sess_id:
return None
scope = extract(content, r'^scope:\s*(.+)')
status = extract(content, r'^status:\s*(.+)', default='closed')
opened_at = extract(content, r'^opened_at:\s*(.+)', r'^opened:\s*(.+)')
type_ = extract(content, r'^type:\s*(.+)', default='work')
handoff = extract(content, r'^handoff_level:\s*(.+)')
story = extract(content, r'^story_angle:\s*(.+)')
parent = extract(content, r'^parent_satellite:\s*(.+)')
sat_type = extract(content, r'^satellite_type:\s*(.+)')
sat_level = extract(content, r'^satellite_level:\s*(.+)')
theme_branch = extract(content, r'^theme_branch:\s*(.+)')
zone = extract(content, r'^zone:\s*(.+)') or infer_zone(scope)
mode = extract(content, r'^mode:\s*(.+)')
# Check if TTL expired → mark stale
if status == 'open' and opened_at:
try:
opened_dt = datetime.fromisoformat(opened_at.replace('Z', '+00:00'))
if datetime.now(opened_dt.tzinfo or None) - opened_dt.replace(tzinfo=None) > timedelta(hours=4):
status = 'stale'
except (ValueError, TypeError):
pass
return {
'sess_id': sess_id,
'type': type_,
'scope': scope,
'status': status,
'opened_at': opened_at,
'handoff_level': handoff or None,
'story_angle': story or None,
'parent_sess': parent or None,
'satellite_type': sat_type or None,
'satellite_level': sat_level or None,
'theme_branch': theme_branch or None,
'zone': zone,
'mode': mode or None,
'ttl_hours': 4,
}
def main():
dry_run = '--dry-run' in sys.argv
archive = '--archive' in sys.argv
if not CLAIMS_DIR.exists():
print(f"❌ claims/ introuvable : {CLAIMS_DIR}")
sys.exit(1)
yml_files = sorted(CLAIMS_DIR.glob('sess-*.yml'))
print(f"📦 {len(yml_files)} fichiers claims trouvés")
if dry_run:
print(" (mode dry-run — aucune écriture)")
conn = sqlite3.connect(str(DB_PATH))
conn.execute("PRAGMA journal_mode=WAL")
migrated = 0
skipped = 0
stale_marked = 0
errors = 0
for yml in yml_files:
claim = parse_claim(yml)
if not claim:
print(f" ⚠️ SKIP {yml.name} — pas de sess_id")
skipped += 1
continue
if claim['status'] == 'stale':
stale_marked += 1
if dry_run:
print(f"{claim['sess_id']} | {claim['status']} | {claim['scope'][:40]}")
migrated += 1
continue
try:
conn.execute("""
INSERT OR IGNORE INTO claims
(sess_id, type, scope, status, opened_at, handoff_level,
story_angle, parent_sess, satellite_type, satellite_level,
theme_branch, zone, mode, ttl_hours)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (
claim['sess_id'], claim['type'], claim['scope'],
claim['status'], claim['opened_at'], claim['handoff_level'],
claim['story_angle'], claim['parent_sess'],
claim['satellite_type'], claim['satellite_level'],
claim['theme_branch'], claim['zone'], claim['mode'],
claim['ttl_hours'],
))
migrated += 1
except Exception as e:
print(f" ❌ ERROR {yml.name} : {e}")
errors += 1
conn.commit()
conn.close()
print(f"\n✅ Migration terminée :")
print(f" Migrés : {migrated}")
print(f" Skippés : {skipped}")
print(f" Stale : {stale_marked} (open > 4h → marqués stale)")
print(f" Erreurs : {errors}")
if archive and not dry_run:
ARCHIVE_DIR.mkdir(parents=True, exist_ok=True)
for yml in yml_files:
shutil.move(str(yml), str(ARCHIVE_DIR / yml.name))
print(f"\n📁 {len(yml_files)} fichiers archivés → {ARCHIVE_DIR}")
print(" → Ajouter 'claims/' à .gitignore pour finaliser")
if __name__ == '__main__':
main()

View File

@@ -1,7 +1,7 @@
#!/bin/bash
# preflight-check.sh — BSI-v3-8 Pre-flight check
# preflight-check.sh — BSI-v3-8 Pre-flight check (ADR-036 : brain.db)
# Valide les 6 conditions avant qu'un satellite commence à écrire.
# Soft-lock kernel : tout satellite hors scope kernel est bloqué sur zone:kernel.
# Source : tables claims, locks, circuit_breaker dans brain.db
#
# Usage :
# preflight-check.sh check <sess_id> <filepath> → 6 checks, exit 0 = go
@@ -21,14 +21,29 @@
set -euo pipefail
BRAIN_ROOT="$(git -C "$(dirname "$0")" rev-parse --show-toplevel)"
CLAIMS_DIR="$BRAIN_ROOT/claims"
LOCKS_DIR="$BRAIN_ROOT/locks"
FAILS_DIR="$BRAIN_ROOT/locks/fails"
DB_PATH="$BRAIN_ROOT/brain.db"
# Chemins zone:kernel — synchronisés avec KERNEL.md + brain-index-regen.sh
# Chemins zone:kernel — synchronisés avec KERNEL.md
KERNEL_SCOPES="agents/ profil/ scripts/ KERNEL.md CLAUDE.md PATHS.md brain-compose.yml brain-constitution.md BRAIN-INDEX.md"
mkdir -p "$FAILS_DIR"
# Init tables si absentes
python3 "$BRAIN_ROOT/scripts/bsi-db.py" -script "
CREATE TABLE IF NOT EXISTS circuit_breaker (
sess_id TEXT PRIMARY KEY,
fail_count INTEGER NOT NULL DEFAULT 0,
last_fail_at TEXT,
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
);
"
# Helper : query brain.db (SELECT → stdout)
q() {
python3 "$BRAIN_ROOT/scripts/bsi-db.py" "$1"
}
# Helper : write brain.db (INSERT/UPDATE/DELETE)
qw() {
python3 "$BRAIN_ROOT/scripts/bsi-db.py" -exec "$1"
}
# Détermine si un filepath est zone:kernel
is_kernel_path() {
@@ -59,20 +74,16 @@ cmd_check() {
local sess_id="$1"
local filepath="$2"
local claim_file="$CLAIMS_DIR/${sess_id}.yml"
local fail_count=0
local all_ok=true
echo "🛫 PRE-FLIGHT — $sess_id$filepath"
echo ""
# CHECK 1 — Claim status
if [ ! -f "$claim_file" ]; then
local claim_status
claim_status=$(q "SELECT status FROM claims WHERE sess_id = '$sess_id';")
if [ -z "$claim_status" ]; then
echo "❌ CHECK 1 — Claim introuvable : $sess_id"
exit 4
fi
local claim_status
claim_status=$(grep '^status:' "$claim_file" | sed 's/^[^:]*: *//' | tr -d '"' | head -1)
if [ "$claim_status" = "paused" ]; then
echo "❌ CHECK 1 — Claim en pause : $sess_id"
echo " → human-gate-ack.sh resume $sess_id"
@@ -91,28 +102,25 @@ cmd_check() {
# CHECK 1b — Cascade pause (parent paused = enfant bloqué)
local parent_id
parent_id=$(grep '^parent_satellite:' "$claim_file" | sed 's/^[^:]*: *//' | tr -d '"' 2>/dev/null || echo "")
parent_id=$(q "SELECT parent_sess FROM claims WHERE sess_id = '$sess_id';")
if [ -n "$parent_id" ]; then
local parent_file="$CLAIMS_DIR/${parent_id}.yml"
if [ -f "$parent_file" ]; then
local parent_status
parent_status=$(grep '^status:' "$parent_file" | sed 's/^[^:]*: *//' | tr -d '"' | head -1)
if [ "$parent_status" = "paused" ]; then
echo "❌ CHECK 1b — Parent en pause : $parent_id"
echo " → human-gate-ack.sh resume $parent_id"
exit 4
fi
if [ "$parent_status" = "failed" ]; then
echo "❌ CHECK 1b — Parent failed : $parent_id — satellite orphelin"
exit 4
fi
local parent_status
parent_status=$(q "SELECT status FROM claims WHERE sess_id = '$parent_id';")
if [ "$parent_status" = "paused" ]; then
echo "❌ CHECK 1b — Parent en pause : $parent_id"
echo " → human-gate-ack.sh resume $parent_id"
exit 4
fi
if [ "$parent_status" = "failed" ]; then
echo "❌ CHECK 1b — Parent failed : $parent_id — satellite orphelin"
exit 4
fi
echo "✅ CHECK 1b — Parent ok"
fi
[ -n "$parent_id" ] && echo "✅ CHECK 1b — Parent ok" || true
# CHECK 2 — Scope check
local claim_scope
claim_scope=$(grep '^scope:' "$claim_file" | sed 's/^[^:]*: *//' | tr -d '"')
claim_scope=$(q "SELECT scope FROM claims WHERE sess_id = '$sess_id';")
local scope_ok=false
for scope_entry in $claim_scope; do
if [[ "$filepath" == ${scope_entry}* ]] || [[ "$filepath" == "$scope_entry" ]]; then
@@ -127,8 +135,6 @@ cmd_check() {
echo "✅ CHECK 2 — Scope ok"
# CHECK 3 — Zone check (soft lock kernel)
# Un satellite dont le scope n'est pas kernel ne peut pas écrire en zone:kernel.
# Exception : kerneluser:true → WARNING (pas de blocage) — owner confirme lui-même.
if is_kernel_path "$filepath"; then
if ! scope_is_kernel "$claim_scope"; then
local kerneluser
@@ -139,7 +145,6 @@ cmd_check() {
else
echo "❌ CHECK 3 — Zone violation : $filepath est zone:kernel"
echo " Scope déclaré [$claim_scope] n'inclut pas de zone:kernel"
echo " → Modification kernel = décision humaine (KERNEL.md règle délégation)"
exit 5
fi
fi
@@ -149,28 +154,26 @@ cmd_check() {
fi
# CHECK 4 — Lock check
local lockname
lockname=$(echo "$filepath" | sed 's|/|-|g' | sed 's|\.|-|g' | sed 's|^-||')
local lockfile="$LOCKS_DIR/${lockname}.lock"
if [ -f "$lockfile" ]; then
local now existing_holder existing_expires existing_epoch
now=$(date +%s)
existing_holder=$(grep '^holder:' "$lockfile" | sed 's/^[^:]*: *//')
existing_expires=$(grep '^expires_at:' "$lockfile" | sed 's/^[^:]*: *//')
existing_epoch=$(date -d "$existing_expires" +%s 2>/dev/null \
|| date -j -f "%Y-%m-%dT%H:%M" "$existing_expires" +%s 2>/dev/null || echo 0)
if [ "$now" -lt "$existing_epoch" ] && [ "$existing_holder" != "$sess_id" ]; then
echo "❌ CHECK 4 — Fichier locké par : $existing_holder (expire : $existing_expires)"
exit 2
fi
local lock_holder
lock_holder=$(q "
SELECT holder FROM locks
WHERE filepath = '$filepath'
AND julianday('now') < julianday(expires_at)
AND holder != '$sess_id'
LIMIT 1;
")
if [ -n "$lock_holder" ]; then
local lock_expires
lock_expires=$(q "SELECT expires_at FROM locks WHERE filepath = '$filepath';")
echo "❌ CHECK 4 — Fichier locké par : $lock_holder (expire : $lock_expires)"
exit 2
fi
echo "✅ CHECK 4 — Lock ok"
# CHECK 5 — Circuit breaker
local fail_count_file="$FAILS_DIR/${sess_id}.count"
if [ -f "$fail_count_file" ]; then
fail_count=$(cat "$fail_count_file")
fi
local fail_count
fail_count=$(q "SELECT COALESCE(fail_count, 0) FROM circuit_breaker WHERE sess_id = '$sess_id';")
fail_count="${fail_count:-0}"
local max_fails
max_fails=$(grep -A5 'circuit_breaker:' "$BRAIN_ROOT/brain-compose.yml" \
| grep 'max_consecutive_fails:' | sed 's/^[^:]*: *//' | awk '{print $1}' | head -1 2>/dev/null || echo 3)
@@ -183,7 +186,7 @@ cmd_check() {
# CHECK 6 — Theme branch
local theme_branch
theme_branch=$(grep '^theme_branch:' "$claim_file" | sed 's/^[^:]*: *//' | tr -d '"' 2>/dev/null || echo "")
theme_branch=$(q "SELECT COALESCE(theme_branch, '') FROM claims WHERE sess_id = '$sess_id';")
if [ -n "$theme_branch" ]; then
local current_branch
current_branch=$(git -C "$BRAIN_ROOT" branch --show-current 2>/dev/null || echo "")
@@ -202,17 +205,22 @@ cmd_check() {
# --- FAIL (circuit breaker increment) ---
cmd_fail() {
local sess_id="$1"
local fail_count_file="$FAILS_DIR/${sess_id}.count"
local count=0
[ -f "$fail_count_file" ] && count=$(cat "$fail_count_file")
count=$((count + 1))
echo "$count" > "$fail_count_file"
qw "
INSERT INTO circuit_breaker (sess_id, fail_count, last_fail_at, updated_at)
VALUES ('$sess_id', 1, datetime('now'), datetime('now'))
ON CONFLICT(sess_id) DO UPDATE SET
fail_count = fail_count + 1,
last_fail_at = datetime('now'),
updated_at = datetime('now')
"
local fail_count
fail_count=$(q "SELECT fail_count FROM circuit_breaker WHERE sess_id = '$sess_id';")
local max_fails
max_fails=$(grep -A5 'circuit_breaker:' "$BRAIN_ROOT/brain-compose.yml" \
| grep 'max_consecutive_fails:' | sed 's/^[^:]*: *//' | awk '{print $1}' | head -1 2>/dev/null || echo 3)
echo "⚠️ Fail enregistré : $count/$max_fails ($sess_id)"
if [ "$count" -ge "$max_fails" ] 2>/dev/null; then
echo "⚠️ Fail enregistré : $fail_count/$max_fails ($sess_id)"
if [ "$fail_count" -ge "$max_fails" ] 2>/dev/null; then
echo "🔴 Circuit breaker déclenché — signal BLOCKED_ON pilote"
fi
}
@@ -220,24 +228,23 @@ cmd_fail() {
# --- RESET (après succès) ---
cmd_reset() {
local sess_id="$1"
local fail_count_file="$FAILS_DIR/${sess_id}.count"
rm -f "$fail_count_file"
qw "DELETE FROM circuit_breaker WHERE sess_id = '$sess_id'"
echo "✅ Circuit breaker reset : $sess_id"
}
# --- STATUS ---
cmd_status() {
local sess_id="$1"
local fail_count_file="$FAILS_DIR/${sess_id}.count"
local count=0
[ -f "$fail_count_file" ] && count=$(cat "$fail_count_file")
local fail_count
fail_count=$(q "SELECT COALESCE(fail_count, 0) FROM circuit_breaker WHERE sess_id = '$sess_id';")
fail_count="${fail_count:-0}"
local max_fails
max_fails=$(grep -A5 'circuit_breaker:' "$BRAIN_ROOT/brain-compose.yml" \
| grep 'max_consecutive_fails:' | sed 's/^[^:]*: *//' | awk '{print $1}' | head -1 2>/dev/null || echo 3)
if [ "$count" -ge "$max_fails" ] 2>/dev/null; then
echo "🔴 Circuit breaker déclenché : $count/$max_fails ($sess_id)"
if [ "$fail_count" -ge "$max_fails" ] 2>/dev/null; then
echo "🔴 Circuit breaker déclenché : $fail_count/$max_fails ($sess_id)"
else
echo "✅ Circuit breaker ok : $count/$max_fails ($sess_id)"
echo "✅ Circuit breaker ok : $fail_count/$max_fails ($sess_id)"
fi
}

View File

@@ -0,0 +1,60 @@
#!/bin/bash
# sync-secrets-from-vps.sh — Migration one-shot : VPS .env → BrainSecrets/MYSECRETS
# Usage : bash scripts/sync-secrets-from-vps.sh
# Lancer depuis le terminal directement (jamais via Claude)
# Les valeurs ne sont jamais affichées — injection silencieuse
set -e
MYSECRETS="$HOME/Dev/BrainSecrets/MYSECRETS"
VPS_USER=$(grep '^VPS_USER=' "$MYSECRETS" | cut -d= -f2-)
VPS_IP=$(grep '^VPS_IP=' "$MYSECRETS" | cut -d= -f2-)
if [[ -z "$VPS_USER" || -z "$VPS_IP" ]]; then
echo "❌ VPS_USER ou VPS_IP manquant dans MYSECRETS"
exit 1
fi
echo "✅ VPS détecté : $VPS_USER@$VPS_IP"
echo ""
inject() {
local prefix="$1"
local key="$2"
local val="$3"
local full_key="${prefix}${key}"
[[ -z "$val" ]] && return
if grep -q "^${full_key}=" "$MYSECRETS"; then
sed -i "s|^${full_key}=.*|${full_key}=${val}|" "$MYSECRETS"
else
echo "${full_key}=${val}" >> "$MYSECRETS"
fi
}
# ── TetaRdPG ──────────────────────────────────────────────────────────────────
echo "→ TetaRdPG .env..."
while IFS='=' read -r key val; do
[[ -z "$key" || "$key" =~ ^# || -z "$val" ]] && continue
inject "TETARDPG_" "$key" "$val"
done < <(ssh "${VPS_USER}@${VPS_IP}" "cat /home/tetardtek/gitea/TetaRdPG/.env 2>/dev/null")
echo " ✅ TETARDPG_* injectées"
# ── OriginsDigital ────────────────────────────────────────────────────────────
echo "→ OriginsDigital .env..."
while IFS='=' read -r key val; do
[[ -z "$key" || "$key" =~ ^# || -z "$val" ]] && continue
inject "ORIGINSDIGITAL_" "$key" "$val"
done < <(ssh "${VPS_USER}@${VPS_IP}" "cat /var/www/originsdigital/backend/.env 2>/dev/null")
echo " ✅ ORIGINSDIGITAL_* injectées"
# ── MySQL root ────────────────────────────────────────────────────────────────
echo "→ MySQL root password..."
mysql_root=$(ssh "${VPS_USER}@${VPS_IP}" "docker inspect mysql-prod --format '{{range .Config.Env}}{{println .}}{{end}}' 2>/dev/null | grep MYSQL_ROOT_PASSWORD | cut -d= -f2-")
inject "" "MYSQL_ROOT_PASSWORD" "$mysql_root"
echo " ✅ MYSQL_ROOT_PASSWORD injectée"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ Consolidation terminée — vérifie BrainSecrets/MYSECRETS"
echo " cd ~/Dev/BrainSecrets && git add MYSECRETS && git commit -m 'feat(secrets): consolidation VPS .env' && git push"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"

124
scripts/sync-template.sh Executable file
View File

@@ -0,0 +1,124 @@
#!/bin/bash
# sync-template.sh — Synchronise brain/ → brain-template/
# Copie les fichiers kernel en excluant tout ce qui est instance/personnel.
# À lancer après chaque modification kernel significative.
#
# Usage :
# sync-template.sh → sync + rapport
# sync-template.sh --dry → rapport sans écrire
# sync-template.sh --push → sync + commit + push
set -euo pipefail
BRAIN_ROOT="$(git -C "$(dirname "$0")" rev-parse --show-toplevel)"
TEMPLATE_DIR="$BRAIN_ROOT/brain-template"
DRY="${1:-}"
PUSH=""
[ "$DRY" = "--push" ] && PUSH=true && DRY=""
if [ ! -d "$TEMPLATE_DIR/.git" ]; then
echo "❌ brain-template/ introuvable ou pas un repo git"
exit 1
fi
echo "🔄 Sync brain → brain-template"
[ -n "$DRY" ] && echo " (dry run — aucune écriture)"
echo ""
# --- Scripts : tout sauf distillation/privé ---
SCRIPTS_EXCLUDE="bsi-server.sh bsi-rag.sh bsi-search.sh brain-bot.py brain-engine.service get-telegram-chatid.sh get-telegram-chatids.sh rotate-oauth-secrets.sh brain-key-server.py brain-key-admin.sh key-guardian.sh"
echo "── scripts/ ────────────────────────────────────"
for f in "$BRAIN_ROOT/scripts/"*.sh "$BRAIN_ROOT/scripts/"*.py; do
[ -f "$f" ] || continue
base=$(basename "$f")
skip=false
for ex in $SCRIPTS_EXCLUDE; do [ "$base" = "$ex" ] && skip=true; done
if [ "$skip" = true ]; then
echo "$base (exclu)"
continue
fi
if [ -z "$DRY" ]; then
cp "$f" "$TEMPLATE_DIR/scripts/"
fi
echo "$base"
done
# --- Agents : tout sauf reviews/ ---
echo ""
echo "── agents/ ─────────────────────────────────────"
if [ -z "$DRY" ]; then
rsync -a --delete --exclude='reviews/' --exclude='bact-scribe.md' \
"$BRAIN_ROOT/agents/" "$TEMPLATE_DIR/agents/"
fi
agent_count=$(ls "$BRAIN_ROOT/agents/"*.md 2>/dev/null | wc -l | tr -d ' ')
echo "$agent_count agents (reviews/ exclu)"
# --- Fichiers kernel racine ---
echo ""
echo "── kernel racine ───────────────────────────────"
KERNEL_FILES="KERNEL.md brain-compose.yml brain-constitution.md"
for f in $KERNEL_FILES; do
if [ -f "$BRAIN_ROOT/$f" ]; then
[ -z "$DRY" ] && cp "$BRAIN_ROOT/$f" "$TEMPLATE_DIR/$f"
echo "$f"
fi
done
# --- Workflows ---
echo ""
echo "── workflows/ ──────────────────────────────────"
if [ -d "$BRAIN_ROOT/workflows" ]; then
if [ -z "$DRY" ]; then
mkdir -p "$TEMPLATE_DIR/workflows"
cp "$BRAIN_ROOT/workflows/_template.yml" "$TEMPLATE_DIR/workflows/" 2>/dev/null || true
cp "$BRAIN_ROOT/workflows/brain-engine.yml" "$TEMPLATE_DIR/workflows/" 2>/dev/null || true
fi
echo " ✅ _template.yml + brain-engine.yml"
fi
# --- Wiki (submodule) ---
echo ""
echo "── wiki/ ───────────────────────────────────────"
WIKI_FILES="multi-instance.md concepts.md patterns.md vocabulary.md session-lifecycle.md cold-start.md"
if [ -d "$BRAIN_ROOT/wiki" ]; then
if [ -z "$DRY" ]; then
mkdir -p "$TEMPLATE_DIR/wiki"
for wf in $WIKI_FILES; do
[ -f "$BRAIN_ROOT/wiki/$wf" ] && cp "$BRAIN_ROOT/wiki/$wf" "$TEMPLATE_DIR/wiki/" && echo "$wf"
done
else
echo " (dry) wiki/$WIKI_FILES"
fi
fi
# --- Gitkeep ---
[ -z "$DRY" ] && mkdir -p "$TEMPLATE_DIR/locks" && \
touch "$TEMPLATE_DIR/locks/.gitkeep"
# --- Isolation check ---
echo ""
echo "── kernel-isolation-check ──────────────────────"
if [ -z "$DRY" ]; then
result=$(bash "$BRAIN_ROOT/scripts/kernel-isolation-check.sh" 2>&1 | tail -3)
echo "$result"
fi
# --- Push ---
if [ -n "$PUSH" ]; then
echo ""
echo "── commit + push ───────────────────────────────"
cd "$TEMPLATE_DIR"
if git diff --quiet && git diff --staged --quiet; then
echo " Aucune modification à commiter"
else
version=$(grep '^version:' "$BRAIN_ROOT/brain-compose.yml" | head -1 | sed 's/version: "//;s/"//')
git add -A
git commit -m "sync: kernel v$version → template"
git push
echo " ✅ Pushé"
fi
fi
echo ""
echo "✅ Sync terminé"

View File

@@ -38,14 +38,13 @@ echo ""
BLOCKERS=()
# --- Check 1 : aucun claim open sur cette branche ---
OPEN_CLAIMS=$(grep -rl "status: open" "$BRAIN_ROOT/claims/" 2>/dev/null || true)
if [ -n "$OPEN_CLAIMS" ]; then
while IFS= read -r claim; do
# Vérifier si le claim référence ce thème ou n'a pas de theme_branch (ambigu)
rel="${claim#$BRAIN_ROOT/}"
BLOCKERS+=(" 🔴 Claim encore ouvert : $rel")
done <<< "$OPEN_CLAIMS"
# --- Check 1 : aucun claim open (ADR-042 — brain.db source unique) ---
OPEN_COUNT=$(bash "$BRAIN_ROOT/scripts/bsi-query.sh" count-open 2>/dev/null || echo "0")
if [ "$OPEN_COUNT" -gt 0 ]; then
OPEN_LIST=$(bash "$BRAIN_ROOT/scripts/bsi-query.sh" open 2>/dev/null || true)
while IFS= read -r line; do
BLOCKERS+=(" 🔴 Claim ouvert : $line")
done <<< "$OPEN_LIST"
fi
# --- Check 2 : aucun signal BLOCKED_ON pending ---

View File

@@ -7,7 +7,7 @@
# bash scripts/workflow-launch.sh <workflow.yml> --step N # step spécifique
# bash scripts/workflow-launch.sh <workflow.yml> --status # état de la chaîne
#
# Le claim généré est affiché + écrit dans claims/ — l'humain lance le satellite.
# Le claim est écrit dans brain.db (ADR-042) — l'humain lance le satellite.
# (Futur : kernel-orchestrator lancera automatiquement — BSI-v3-9)
set -euo pipefail
@@ -53,20 +53,26 @@ echo "📋 Workflow : $THEME_NAME"
echo " Branche : $THEME_BRANCH"
echo ""
# --- Mode status : afficher l'état de la chaîne ---
# --- Mode status : afficher l'état de la chaîne (brain.db — ADR-042) ---
if [ "$MODE" = "status" ]; then
echo "État des claims pour ce thème :"
echo ""
# Trouver les claims qui référencent ce theme_branch
for claim in "$BRAIN_ROOT/claims/"sess-*.yml; do
if grep -q "theme_branch: $THEME_BRANCH" "$claim" 2>/dev/null; then
sess_id=$(grep '^sess_id:' "$claim" | sed 's/sess_id: *//')
status=$(grep '^status:' "$claim" | sed 's/status: *//')
step=$(grep '^workflow_step:' "$claim" 2>/dev/null | sed 's/workflow_step: *//' || echo "?")
result_status=$(grep 'status:' "$claim" | grep -v '^status:' | head -1 | sed 's/.*status: *//' || echo "-")
echo " Step $step$sess_id [$status] result:$result_status"
fi
done
python3 -c "
import sqlite3, sys
conn = sqlite3.connect('$BRAIN_ROOT/brain.db')
conn.row_factory = sqlite3.Row
rows = conn.execute(
'SELECT sess_id, status, workflow_step, result_status FROM claims WHERE theme_branch = ? ORDER BY workflow_step',
('$THEME_BRANCH',)
).fetchall()
conn.close()
if not rows:
print(' (aucun claim pour ce thème)')
for r in rows:
step = r['workflow_step'] or '?'
result = r['result_status'] or '-'
print(f\" Step {step} — {r['sess_id']} [{r['status']}] result:{result}\")
" 2>/dev/null || echo " ⚠️ brain.db inaccessible"
exit 0
fi
@@ -130,19 +136,17 @@ fi
if [ -n "$TARGET_STEP" ]; then
STEP_IDX=$((TARGET_STEP - 1))
else
# Trouver le dernier step complété via les claims
LAST_DONE=0
for claim in "$BRAIN_ROOT/claims/"sess-*.yml; do
if grep -q "theme_branch: $THEME_BRANCH" "$claim" 2>/dev/null; then
if grep -q "status: closed" "$claim" 2>/dev/null; then
claim_step=$(grep '^workflow_step:' "$claim" 2>/dev/null \
| sed 's/workflow_step: *//' || echo "0")
if [ "$claim_step" -gt "$LAST_DONE" ] 2>/dev/null; then
LAST_DONE="$claim_step"
fi
fi
fi
done
# Trouver le dernier step complété via brain.db (ADR-042)
LAST_DONE=$(python3 -c "
import sqlite3
conn = sqlite3.connect('$BRAIN_ROOT/brain.db')
r = conn.execute(
'SELECT MAX(workflow_step) FROM claims WHERE theme_branch = ? AND status = ?',
('$THEME_BRANCH', 'closed')
).fetchone()
conn.close()
print(r[0] if r[0] is not None else 0)
" 2>/dev/null || echo 0)
STEP_IDX=$LAST_DONE
fi
@@ -188,29 +192,25 @@ fi
DATETIME=$(date +%Y%m%d-%H%M)
SCOPE_SLUG=$(echo "$STEP_SCOPE" | tr '/' '-' | sed 's/-$//' | tr '[:upper:]' '[:lower:]')
SESS_ID="sess-${DATETIME}-${THEME_NAME}-step${STEP_NUM}"
CLAIM_FILE="$BRAIN_ROOT/claims/${SESS_ID}.yml"
# Écrire le claim
cat > "$CLAIM_FILE" << EOF
sess_id: $SESS_ID
type: satellite
scope: $STEP_SCOPE
agent: satellite-boot
status: open
opened_at: "$(date +%Y-%m-%dT%H:%M)"
handoff_level: 0
story_angle: "$STEP_ANGLE"
satellite_type: $STEP_TYPE
satellite_level: leaf
parent_satellite: ~
theme_branch: $THEME_BRANCH
workflow: $THEME_NAME
workflow_step: $STEP_NUM
on_done: $ON_DONE
on_fail: $ON_FAIL
EOF
# Écrire le claim dans brain.db (ADR-042 — source unique)
bash "$BRAIN_ROOT/scripts/bsi-claim.sh" open "$SESS_ID" \
--scope "$STEP_SCOPE" --type "satellite" --zone "project" \
--story "$STEP_ANGLE" --mode "$STEP_TYPE"
# Enrichir avec les champs workflow spécifiques
python3 -c "
import sqlite3
conn = sqlite3.connect('$BRAIN_ROOT/brain.db')
conn.execute('''
UPDATE claims SET satellite_type = ?, satellite_level = 'leaf',
theme_branch = ?, workflow = ?, workflow_step = ?
WHERE sess_id = ?
''', ('$STEP_TYPE', '$THEME_BRANCH', '$THEME_NAME', $STEP_NUM, '$SESS_ID'))
conn.commit()
conn.close()
" 2>/dev/null
echo "✅ Claim généré : claims/${SESS_ID}.yml"
echo ""
echo " Step : $STEP_NUM / $TOTAL_STEPS"
echo " Type : $STEP_TYPE"
@@ -221,6 +221,3 @@ echo " Gate : $STEP_GATE"
fi
echo " On done : $ON_DONE"
echo " On fail : $ON_FAIL"
echo ""
echo "→ Commiter le claim :"
echo " git add claims/${SESS_ID}.yml && git commit -m \"bsi: open satellite ${SESS_ID}\""