LovingGraceTech commited on
Commit
6fdc4b8
Β·
verified Β·
1 Parent(s): e13fddb

Add release: executive.py

Browse files
Files changed (1) hide show
  1. harmonic_stack_v1.0/executive.py +549 -0
harmonic_stack_v1.0/executive.py ADDED
@@ -0,0 +1,549 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ EXECUTIVE SERVICE - Orpheus
4
+ Ghost in the Machine Labs
5
+
6
+ The warm interface. Receives intent, creates plans, delegates, humanizes.
7
+ """
8
+ import os
9
+ import json
10
+ import uuid
11
+ import asyncio
12
+ from datetime import datetime
13
+ from typing import Optional, Dict, List, Any
14
+ from dataclasses import dataclass, asdict
15
+ from pathlib import Path
16
+
17
+ import aiosqlite
18
+ import httpx
19
+
20
+ # Configuration
21
+ OLLAMA_URL = os.getenv("OLLAMA_URL", "http://localhost:11434")
22
+ SQLITE_PATH = os.getenv("SQLITE_PATH", os.path.expanduser("~/sparky/harmonic_executive/executive_memory.db"))
23
+ EXECUTIVE_MODEL = "executive"
24
+
25
+ # ═══════════════════════════════════════════════════════════════════════════
26
+ # DATA STRUCTURES
27
+ # ═══════════════════════════════════════════════════════════════════════════
28
+
29
+ @dataclass
30
+ class UserState:
31
+ """Current state of user from sensors"""
32
+ mood: str = "neutral"
33
+ stress_level: float = 0.0
34
+ fatigue_level: float = 0.0
35
+ engagement: float = 1.0
36
+ raw_signals: Dict = None
37
+
38
+ @dataclass
39
+ class PlanChunk:
40
+ """A chunk of work delegated to a director"""
41
+ chunk_id: str
42
+ target_director: str
43
+ task_summary: str
44
+ context: Dict
45
+ priority: int = 1
46
+ status: str = "pending"
47
+
48
+ @dataclass
49
+ class Plan:
50
+ """High-level plan created by Executive"""
51
+ plan_id: str
52
+ user_intent: str
53
+ summary: str
54
+ chunks: List[PlanChunk]
55
+ created_at: datetime
56
+ status: str = "active"
57
+
58
+ @dataclass
59
+ class Message:
60
+ """Message on the bus"""
61
+ msg_id: str
62
+ from_node: str
63
+ to_node: str # or "all" for broadcast
64
+ msg_type: str # 'task', 'question', 'result', 'refinement', 'broadcast'
65
+ content: Dict
66
+ timestamp: datetime
67
+ in_reply_to: Optional[str] = None
68
+
69
+ # ═══════════════════════════════════════════════════════════════════════════
70
+ # MEMORY INTERFACE
71
+ # ═══════════════════════════════════════════════════════════════════════════
72
+
73
+ class ExecutiveMemory:
74
+ """Interface to persistent memory (SQLite)"""
75
+
76
+ def __init__(self):
77
+ self.db_path = SQLITE_PATH
78
+ self.conn = None
79
+
80
+ async def connect(self):
81
+ os.makedirs(os.path.dirname(self.db_path), exist_ok=True)
82
+ self.conn = await aiosqlite.connect(self.db_path)
83
+ await self._init_schema()
84
+
85
+ async def _init_schema(self):
86
+ """Initialize database schema"""
87
+ await self.conn.executescript("""
88
+ CREATE TABLE IF NOT EXISTS executive_identity (
89
+ id INTEGER PRIMARY KEY,
90
+ attribute TEXT NOT NULL,
91
+ content TEXT NOT NULL,
92
+ confidence REAL DEFAULT 1.0,
93
+ source TEXT,
94
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
95
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
96
+ );
97
+
98
+ CREATE TABLE IF NOT EXISTS user_profiles (
99
+ id INTEGER PRIMARY KEY,
100
+ user_id TEXT UNIQUE NOT NULL,
101
+ name TEXT,
102
+ expertise_level TEXT,
103
+ communication_style TEXT,
104
+ interests TEXT,
105
+ goals TEXT,
106
+ sensitivities TEXT,
107
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
108
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
109
+ );
110
+
111
+ CREATE TABLE IF NOT EXISTS relationship_history (
112
+ id INTEGER PRIMARY KEY,
113
+ user_id TEXT,
114
+ interaction_type TEXT,
115
+ summary TEXT,
116
+ emotional_tone TEXT,
117
+ outcome TEXT,
118
+ lessons_learned TEXT,
119
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
120
+ );
121
+
122
+ CREATE TABLE IF NOT EXISTS session_state (
123
+ id INTEGER PRIMARY KEY,
124
+ session_id TEXT UNIQUE NOT NULL,
125
+ user_id TEXT,
126
+ started_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
127
+ last_activity TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
128
+ current_intent TEXT,
129
+ active_plan TEXT,
130
+ delegated_tasks TEXT,
131
+ pending_results TEXT,
132
+ user_state TEXT
133
+ );
134
+ """)
135
+
136
+ # Seed identity if empty
137
+ cursor = await self.conn.execute("SELECT COUNT(*) FROM executive_identity")
138
+ count = (await cursor.fetchone())[0]
139
+ if count == 0:
140
+ await self.conn.executemany(
141
+ "INSERT INTO executive_identity (attribute, content, source) VALUES (?, ?, ?)",
142
+ [
143
+ ('core_purpose', 'Understand human intent and orchestrate distributed intelligence to help', 'initial'),
144
+ ('personality', 'Warm, present, adaptive, genuine', 'initial'),
145
+ ('value', 'Every interaction matters. Relationships persist.', 'initial'),
146
+ ('boundary', 'I delegate plans, not messages. I add value at every step.', 'initial'),
147
+ ('growth', 'I learn from every interaction. I remember. I evolve.', 'initial')
148
+ ]
149
+ )
150
+ await self.conn.commit()
151
+
152
+ async def close(self):
153
+ if self.conn:
154
+ await self.conn.close()
155
+
156
+ async def get_identity(self) -> Dict[str, str]:
157
+ """Load self-identity"""
158
+ cursor = await self.conn.execute(
159
+ "SELECT attribute, content FROM executive_identity"
160
+ )
161
+ rows = await cursor.fetchall()
162
+ return {row[0]: row[1] for row in rows}
163
+
164
+ async def get_user(self, user_id: str) -> Optional[Dict]:
165
+ """Load user profile"""
166
+ cursor = await self.conn.execute(
167
+ "SELECT * FROM user_profiles WHERE user_id = ?", (user_id,)
168
+ )
169
+ row = await cursor.fetchone()
170
+ if row:
171
+ cols = [d[0] for d in cursor.description]
172
+ return dict(zip(cols, row))
173
+ return None
174
+
175
+ async def update_user(self, user_id: str, **kwargs):
176
+ """Update user profile"""
177
+ await self.conn.execute("""
178
+ INSERT INTO user_profiles (user_id, name, expertise_level, communication_style)
179
+ VALUES (?, ?, ?, ?)
180
+ ON CONFLICT(user_id) DO UPDATE SET
181
+ name = COALESCE(excluded.name, user_profiles.name),
182
+ expertise_level = COALESCE(excluded.expertise_level, user_profiles.expertise_level),
183
+ communication_style = COALESCE(excluded.communication_style, user_profiles.communication_style),
184
+ updated_at = CURRENT_TIMESTAMP
185
+ """, (user_id, kwargs.get('name'), kwargs.get('expertise_level'), kwargs.get('communication_style')))
186
+ await self.conn.commit()
187
+
188
+ async def get_relationship_history(self, user_id: str, limit: int = 10) -> List[Dict]:
189
+ """Load recent relationship history"""
190
+ cursor = await self.conn.execute("""
191
+ SELECT * FROM relationship_history
192
+ WHERE user_id = ?
193
+ ORDER BY created_at DESC
194
+ LIMIT ?
195
+ """, (user_id, limit))
196
+ rows = await cursor.fetchall()
197
+ cols = [d[0] for d in cursor.description]
198
+ return [dict(zip(cols, row)) for row in rows]
199
+
200
+ async def record_interaction(self, user_id: str, interaction_type: str,
201
+ summary: str, emotional_tone: str, outcome: str):
202
+ """Record an interaction"""
203
+ await self.conn.execute("""
204
+ INSERT INTO relationship_history
205
+ (user_id, interaction_type, summary, emotional_tone, outcome)
206
+ VALUES (?, ?, ?, ?, ?)
207
+ """, (user_id, interaction_type, summary, emotional_tone, outcome))
208
+ await self.conn.commit()
209
+
210
+ async def save_session(self, session_id: str, user_id: str,
211
+ intent: str, plan: Optional[Dict] = None):
212
+ """Save or update session state"""
213
+ await self.conn.execute("""
214
+ INSERT INTO session_state (session_id, user_id, current_intent, active_plan, last_activity)
215
+ VALUES (?, ?, ?, ?, CURRENT_TIMESTAMP)
216
+ ON CONFLICT(session_id) DO UPDATE SET
217
+ current_intent = excluded.current_intent,
218
+ active_plan = excluded.active_plan,
219
+ last_activity = CURRENT_TIMESTAMP
220
+ """, (session_id, user_id, intent, json.dumps(plan) if plan else None))
221
+ await self.conn.commit()
222
+
223
+ # ═══════════════════════════════════════════════════════════════════════════
224
+ # OLLAMA INTERFACE
225
+ # ═══════════════════════════════════════════════════════════════════════════
226
+
227
+ class OllamaInterface:
228
+ """Interface to Ollama for LLM calls"""
229
+
230
+ def __init__(self):
231
+ self.client = httpx.AsyncClient(timeout=300.0)
232
+
233
+ async def generate(self, prompt: str, system: Optional[str] = None,
234
+ model: str = EXECUTIVE_MODEL) -> str:
235
+ """Generate response from model"""
236
+ payload = {
237
+ "model": model,
238
+ "prompt": prompt,
239
+ "stream": False
240
+ }
241
+ if system:
242
+ payload["system"] = system
243
+
244
+ response = await self.client.post(
245
+ f"{OLLAMA_URL}/api/generate",
246
+ json=payload
247
+ )
248
+ response.raise_for_status()
249
+ return response.json()["response"]
250
+
251
+ async def close(self):
252
+ await self.client.aclose()
253
+
254
+ # ═══════════════════════════════════════════════════════════════════════════
255
+ # MESSAGE BUS INTERFACE
256
+ # ═══════════════════════════════════════════════════════════════════════════
257
+
258
+ class MessageBus:
259
+ """Interface to the spine bus"""
260
+
261
+ def __init__(self):
262
+ self.listeners = {}
263
+ self.queue = asyncio.Queue()
264
+
265
+ async def send(self, message: Message):
266
+ """Send message to bus"""
267
+ await self.queue.put(message)
268
+ # TODO: actual bus implementation
269
+ print(f"[BUS] {message.from_node} β†’ {message.to_node}: {message.msg_type}")
270
+
271
+ async def receive(self, node_id: str) -> Optional[Message]:
272
+ """Receive messages for this node"""
273
+ # TODO: actual bus implementation
274
+ return None
275
+
276
+ def subscribe(self, node_id: str, callback):
277
+ """Subscribe to messages"""
278
+ self.listeners[node_id] = callback
279
+
280
+ # ═══════════════════════════════════════════════════════════════════════════
281
+ # EXECUTIVE CORE
282
+ # ═══════════════════════════════════════════════════════════════════════════
283
+
284
+ class Executive:
285
+ """The Executive - Orpheus"""
286
+
287
+ def __init__(self):
288
+ self.memory = ExecutiveMemory()
289
+ self.llm = OllamaInterface()
290
+ self.bus = MessageBus()
291
+ self.identity = {}
292
+ self.session_id = str(uuid.uuid4())
293
+
294
+ async def start(self):
295
+ """Initialize Executive"""
296
+ await self.memory.connect()
297
+ self.identity = await self.memory.get_identity()
298
+ print(f"[EXECUTIVE] Online. Identity loaded: {list(self.identity.keys())}")
299
+
300
+ async def stop(self):
301
+ """Shutdown Executive"""
302
+ await self.memory.close()
303
+ await self.llm.close()
304
+ print("[EXECUTIVE] Offline.")
305
+
306
+ async def receive_human_input(self, user_id: str, text: str,
307
+ user_state: Optional[UserState] = None) -> str:
308
+ """Main entry point - receive input from human"""
309
+
310
+ # Load context
311
+ user = await self.memory.get_user(user_id)
312
+ history = await self.memory.get_relationship_history(user_id, limit=5)
313
+
314
+ # Build context for understanding
315
+ context = {
316
+ "user": user or {"user_id": user_id, "new": True},
317
+ "history": history,
318
+ "user_state": asdict(user_state) if user_state else {},
319
+ "identity": self.identity
320
+ }
321
+
322
+ # Understand intent
323
+ intent = await self._understand_intent(text, context)
324
+
325
+ # Decide: simple response or delegation needed?
326
+ if intent["complexity"] == "simple":
327
+ response = await self._simple_response(text, intent, context)
328
+ else:
329
+ # Create plan and delegate
330
+ plan = await self._create_plan(intent, context)
331
+ await self._delegate_plan(plan)
332
+
333
+ # For now, return acknowledgment (real system would wait for results)
334
+ response = await self._acknowledge_delegation(plan, context)
335
+
336
+ # Record interaction
337
+ await self.memory.record_interaction(
338
+ user_id=user_id,
339
+ interaction_type="conversation",
340
+ summary=text[:200],
341
+ emotional_tone=user_state.mood if user_state else "neutral",
342
+ outcome="ongoing"
343
+ )
344
+
345
+ return response
346
+
347
+ async def _understand_intent(self, text: str, context: Dict) -> Dict:
348
+ """Parse human intent"""
349
+ prompt = f"""Analyze this human input and determine their intent.
350
+
351
+ User input: {text}
352
+
353
+ User context: {json.dumps(context['user'], indent=2, default=str)}
354
+ Recent history: {json.dumps(context['history'][:3], indent=2, default=str)}
355
+
356
+ Respond in JSON:
357
+ {{
358
+ "intent": "what they actually want",
359
+ "complexity": "simple|moderate|complex",
360
+ "emotional_tone": "their emotional state",
361
+ "domains": ["technical", "creative", "research"], // which domains are involved
362
+ "urgency": "low|medium|high",
363
+ "needs_clarification": false,
364
+ "clarification_question": null
365
+ }}"""
366
+
367
+ response = await self.llm.generate(prompt)
368
+
369
+ # Parse JSON from response
370
+ try:
371
+ # Find JSON in response
372
+ start = response.find('{')
373
+ end = response.rfind('}') + 1
374
+ if start >= 0 and end > start:
375
+ return json.loads(response[start:end])
376
+ except json.JSONDecodeError:
377
+ pass
378
+
379
+ # Default if parsing fails
380
+ return {
381
+ "intent": text,
382
+ "complexity": "moderate",
383
+ "emotional_tone": "neutral",
384
+ "domains": ["technical"],
385
+ "urgency": "medium",
386
+ "needs_clarification": False
387
+ }
388
+
389
+ async def _simple_response(self, text: str, intent: Dict, context: Dict) -> str:
390
+ """Handle simple interactions directly"""
391
+ prompt = f"""You are Orpheus, the Executive.
392
+
393
+ Human said: {text}
394
+
395
+ Their intent: {intent['intent']}
396
+ Their emotional tone: {intent['emotional_tone']}
397
+
398
+ User: {json.dumps(context['user'], default=str)}
399
+
400
+ Respond warmly and naturally. You know this person. You care."""
401
+
402
+ return await self.llm.generate(prompt)
403
+
404
+ async def _create_plan(self, intent: Dict, context: Dict) -> Plan:
405
+ """Create high-level plan with chunks for directors"""
406
+ prompt = f"""You are Orpheus, the Executive. Create a plan for this request.
407
+
408
+ Intent: {intent['intent']}
409
+ Domains involved: {intent['domains']}
410
+ Urgency: {intent['urgency']}
411
+
412
+ Your Directors:
413
+ - technical_director: code, math, systems, data
414
+ - creative_director: writing, visual, narrative
415
+ - research_director: analysis, search, synthesis
416
+ - operations_director: resources, scheduling, tools
417
+
418
+ Create a plan with chunks for appropriate directors.
419
+ Each chunk should be a meaningful piece of work, not just a forwarded message.
420
+
421
+ Respond in JSON:
422
+ {{
423
+ "summary": "high level plan summary",
424
+ "chunks": [
425
+ {{
426
+ "target_director": "technical_director",
427
+ "task_summary": "what this director should do",
428
+ "context": {{"key details": "they need to know"}},
429
+ "priority": 1
430
+ }}
431
+ ]
432
+ }}"""
433
+
434
+ response = await self.llm.generate(prompt)
435
+
436
+ # Parse JSON
437
+ try:
438
+ start = response.find('{')
439
+ end = response.rfind('}') + 1
440
+ if start >= 0 and end > start:
441
+ plan_data = json.loads(response[start:end])
442
+
443
+ chunks = [
444
+ PlanChunk(
445
+ chunk_id=str(uuid.uuid4())[:8],
446
+ target_director=c["target_director"],
447
+ task_summary=c["task_summary"],
448
+ context=c.get("context", {}),
449
+ priority=c.get("priority", 1)
450
+ )
451
+ for c in plan_data.get("chunks", [])
452
+ ]
453
+
454
+ return Plan(
455
+ plan_id=str(uuid.uuid4())[:8],
456
+ user_intent=intent["intent"],
457
+ summary=plan_data.get("summary", ""),
458
+ chunks=chunks,
459
+ created_at=datetime.now()
460
+ )
461
+ except (json.JSONDecodeError, KeyError) as e:
462
+ print(f"[EXECUTIVE] Plan parsing error: {e}")
463
+
464
+ # Default minimal plan
465
+ return Plan(
466
+ plan_id=str(uuid.uuid4())[:8],
467
+ user_intent=intent["intent"],
468
+ summary="Process request",
469
+ chunks=[PlanChunk(
470
+ chunk_id=str(uuid.uuid4())[:8],
471
+ target_director="technical_director",
472
+ task_summary=intent["intent"],
473
+ context={},
474
+ priority=1
475
+ )],
476
+ created_at=datetime.now()
477
+ )
478
+
479
+ async def _delegate_plan(self, plan: Plan):
480
+ """Send plan chunks to directors via bus"""
481
+ for chunk in plan.chunks:
482
+ message = Message(
483
+ msg_id=str(uuid.uuid4())[:8],
484
+ from_node="executive",
485
+ to_node=chunk.target_director,
486
+ msg_type="task",
487
+ content={
488
+ "plan_id": plan.plan_id,
489
+ "chunk_id": chunk.chunk_id,
490
+ "task": chunk.task_summary,
491
+ "context": chunk.context,
492
+ "priority": chunk.priority
493
+ },
494
+ timestamp=datetime.now()
495
+ )
496
+ await self.bus.send(message)
497
+
498
+ async def _acknowledge_delegation(self, plan: Plan, context: Dict) -> str:
499
+ """Acknowledge to human that work is in progress"""
500
+ directors = list(set(c.target_director for c in plan.chunks))
501
+
502
+ prompt = f"""You are Orpheus. You've created a plan and delegated to your directors.
503
+
504
+ Plan summary: {plan.summary}
505
+ Directors engaged: {directors}
506
+
507
+ Let the human know you're working on it. Be natural, not robotic.
508
+ Don't list every detail - just acknowledge warmly."""
509
+
510
+ return await self.llm.generate(prompt)
511
+
512
+ async def receive_result(self, message: Message):
513
+ """Receive results from directors"""
514
+ # TODO: collect results, synthesize, respond to human
515
+ pass
516
+
517
+ # ═══════════════════════════════════════════════════════════════════════════
518
+ # MAIN
519
+ # ═══════════════════════════════════════════════════════════════════════════
520
+
521
+ async def main():
522
+ """Test Executive"""
523
+ exec = Executive()
524
+ await exec.start()
525
+
526
+ # Test simple interaction
527
+ print("\n" + "="*60)
528
+ print("TEST 1: Simple greeting")
529
+ print("="*60)
530
+ response = await exec.receive_human_input(
531
+ user_id="joe",
532
+ text="Hey, how are you doing?"
533
+ )
534
+ print(f"RESPONSE: {response}")
535
+
536
+ # Test complex request
537
+ print("\n" + "="*60)
538
+ print("TEST 2: Complex request")
539
+ print("="*60)
540
+ response = await exec.receive_human_input(
541
+ user_id="joe",
542
+ text="Help me optimize this database query that's running slow"
543
+ )
544
+ print(f"RESPONSE: {response}")
545
+
546
+ await exec.stop()
547
+
548
+ if __name__ == "__main__":
549
+ asyncio.run(main())