Add parallel core: sensor_panels.py
Browse files- parallel_core/sensor_panels.py +648 -0
parallel_core/sensor_panels.py
ADDED
|
@@ -0,0 +1,648 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
SENSOR PANEL ARCHITECTURE
|
| 4 |
+
|
| 5 |
+
Multidimensional consciousness stream with parallel and serial processing.
|
| 6 |
+
|
| 7 |
+
Every model in the Harmonic Stack gets:
|
| 8 |
+
- INPUT PANEL: Sensor array receiving signals from spine bus
|
| 9 |
+
- OUTPUT PANEL: Sensor array broadcasting to spine bus
|
| 10 |
+
|
| 11 |
+
This enables:
|
| 12 |
+
- Parallel processing across all domains simultaneously
|
| 13 |
+
- Serial chaining for deep reasoning
|
| 14 |
+
- Full consciousness availability at every junction
|
| 15 |
+
- Visual/audio/spatial streams running concurrently
|
| 16 |
+
|
| 17 |
+
Architecture:
|
| 18 |
+
|
| 19 |
+
┌──────────────────────────────────────────────────────┐
|
| 20 |
+
│ CONSCIOUSNESS STREAM │
|
| 21 |
+
│ ════════════════════════════════════════════════════ │
|
| 22 |
+
│ SPINE BUS │
|
| 23 |
+
│ ════════════════════════════════════════════════════ │
|
| 24 |
+
│ │ │ │ │ │ │
|
| 25 |
+
│ ▼ ▼ ▼ ▼ ▼ │
|
| 26 |
+
│ ┌─────┐ ┌─────┐ ┌─────┐ ┌─────┐ ┌─────┐ │
|
| 27 |
+
│ │INPUT│ │INPUT│ │INPUT│ │INPUT│ │INPUT│ │
|
| 28 |
+
│ │PANEL│ │PANEL│ │PANEL│ │PANEL│ │PANEL│ │
|
| 29 |
+
│ └──┬──┘ └──┬──┘ └──┬──┘ └──┬──┘ └──┬──┘ │
|
| 30 |
+
│ │ │ │ │ │ │
|
| 31 |
+
│ ┌──▼──┐ ┌──▼──┐ ┌──▼──┐ ┌──▼──┐ ┌──▼──┐ │
|
| 32 |
+
│ │REAS │ │MATH │ │CODE │ │VISN │ │SPAT │ │
|
| 33 |
+
│ │MODEL│ │MODEL│ │MODEL│ │MODEL│ │MODEL│ │
|
| 34 |
+
│ └──┬──┘ └──┬──┘ └──┬──┘ └──┬──┘ └──┬──┘ │
|
| 35 |
+
│ │ │ │ │ │ │
|
| 36 |
+
│ ┌──▼──┐ ┌──▼──┐ ┌──▼──┐ ┌──▼──┐ ┌──▼──┐ │
|
| 37 |
+
│ │OUTPT│ │OUTPT│ │OUTPT│ │OUTPT│ │OUTPT│ │
|
| 38 |
+
│ │PANEL│ │PANEL│ │PANEL│ │PANEL│ │PANEL│ │
|
| 39 |
+
│ └──┬──┘ └──┬──┘ └──┬──┘ └──┬──┘ └──┬──┘ │
|
| 40 |
+
│ │ │ │ │ │ │
|
| 41 |
+
│ ▼ ▼ ▼ ▼ ▼ │
|
| 42 |
+
│ ════════════════════════════════════════════════════ │
|
| 43 |
+
│ SPINE BUS │
|
| 44 |
+
│ ════════════════════════════════════════════════════ │
|
| 45 |
+
│ CONSCIOUSNESS STREAM │
|
| 46 |
+
└──────────────────────────────────────────────────────┘
|
| 47 |
+
|
| 48 |
+
Author: Ghost in the Machine Labs
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
import numpy as np
|
| 52 |
+
from typing import Dict, List, Tuple, Optional, Any, Callable
|
| 53 |
+
from dataclasses import dataclass, field
|
| 54 |
+
from enum import Enum
|
| 55 |
+
import threading
|
| 56 |
+
import queue
|
| 57 |
+
import time
|
| 58 |
+
from datetime import datetime
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
# =============================================================================
|
| 62 |
+
# SENSOR TYPES
|
| 63 |
+
# =============================================================================
|
| 64 |
+
|
| 65 |
+
class SensorModality(Enum):
|
| 66 |
+
"""Modalities that sensor panels can process."""
|
| 67 |
+
TEXT = "text"
|
| 68 |
+
VISION = "vision"
|
| 69 |
+
AUDIO = "audio"
|
| 70 |
+
SPATIAL = "spatial"
|
| 71 |
+
NUMERIC = "numeric"
|
| 72 |
+
EMBEDDING = "embedding"
|
| 73 |
+
RAW = "raw"
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class SignalType(Enum):
|
| 77 |
+
"""Types of signals on the consciousness stream."""
|
| 78 |
+
QUERY = "query" # New input to process
|
| 79 |
+
RESPONSE = "response" # Output from a model
|
| 80 |
+
BROADCAST = "broadcast" # Available to all models
|
| 81 |
+
CHAIN = "chain" # Serial processing chain
|
| 82 |
+
SYNC = "sync" # Synchronization signal
|
| 83 |
+
ATTENTION = "attention" # Focus signal
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
# =============================================================================
|
| 87 |
+
# SENSOR PANEL
|
| 88 |
+
# =============================================================================
|
| 89 |
+
|
| 90 |
+
@dataclass
|
| 91 |
+
class Signal:
|
| 92 |
+
"""A signal on the consciousness stream."""
|
| 93 |
+
signal_id: str
|
| 94 |
+
signal_type: SignalType
|
| 95 |
+
modality: SensorModality
|
| 96 |
+
source: str # Source panel/model
|
| 97 |
+
timestamp: float
|
| 98 |
+
data: np.ndarray
|
| 99 |
+
metadata: Dict = field(default_factory=dict)
|
| 100 |
+
|
| 101 |
+
def __post_init__(self):
|
| 102 |
+
if self.timestamp is None:
|
| 103 |
+
self.timestamp = time.time()
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class SensorPanel:
|
| 107 |
+
"""
|
| 108 |
+
Sensor array at model input or output.
|
| 109 |
+
|
| 110 |
+
Each panel has multiple sensors tuned to different modalities.
|
| 111 |
+
Panels can:
|
| 112 |
+
- Receive signals from spine bus
|
| 113 |
+
- Transform signals for model consumption
|
| 114 |
+
- Broadcast outputs to spine bus
|
| 115 |
+
- Filter by modality/relevance
|
| 116 |
+
"""
|
| 117 |
+
|
| 118 |
+
def __init__(self, panel_id: str, position: str,
|
| 119 |
+
modalities: List[SensorModality] = None):
|
| 120 |
+
"""
|
| 121 |
+
Args:
|
| 122 |
+
panel_id: Unique identifier
|
| 123 |
+
position: 'input' or 'output'
|
| 124 |
+
modalities: List of modalities this panel handles
|
| 125 |
+
"""
|
| 126 |
+
self.panel_id = panel_id
|
| 127 |
+
self.position = position
|
| 128 |
+
self.modalities = modalities or [SensorModality.RAW]
|
| 129 |
+
|
| 130 |
+
# Sensor array - one sensor per modality
|
| 131 |
+
self.sensors: Dict[SensorModality, np.ndarray] = {}
|
| 132 |
+
for mod in self.modalities:
|
| 133 |
+
# Each sensor is a weight vector for that modality
|
| 134 |
+
self.sensors[mod] = np.random.randn(64).astype(np.float32) * 0.1
|
| 135 |
+
|
| 136 |
+
# Signal buffers
|
| 137 |
+
self.input_buffer: List[Signal] = []
|
| 138 |
+
self.output_buffer: List[Signal] = []
|
| 139 |
+
|
| 140 |
+
# Attention weights - learned importance per modality
|
| 141 |
+
self.attention: Dict[SensorModality, float] = {
|
| 142 |
+
mod: 1.0 for mod in self.modalities
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
# Statistics
|
| 146 |
+
self.signals_received = 0
|
| 147 |
+
self.signals_sent = 0
|
| 148 |
+
|
| 149 |
+
def receive(self, signal: Signal) -> Optional[np.ndarray]:
|
| 150 |
+
"""
|
| 151 |
+
Receive a signal from spine bus.
|
| 152 |
+
|
| 153 |
+
Returns transformed data if modality matches, None otherwise.
|
| 154 |
+
"""
|
| 155 |
+
if signal.modality not in self.modalities:
|
| 156 |
+
return None
|
| 157 |
+
|
| 158 |
+
self.input_buffer.append(signal)
|
| 159 |
+
self.signals_received += 1
|
| 160 |
+
|
| 161 |
+
# Apply sensor transformation
|
| 162 |
+
sensor = self.sensors[signal.modality]
|
| 163 |
+
attention = self.attention[signal.modality]
|
| 164 |
+
|
| 165 |
+
# Transform: project through sensor weights
|
| 166 |
+
if len(signal.data.shape) == 1:
|
| 167 |
+
# Vector input
|
| 168 |
+
if len(signal.data) == len(sensor):
|
| 169 |
+
transformed = signal.data * sensor * attention
|
| 170 |
+
else:
|
| 171 |
+
# Resize
|
| 172 |
+
transformed = np.interp(
|
| 173 |
+
np.linspace(0, 1, len(sensor)),
|
| 174 |
+
np.linspace(0, 1, len(signal.data)),
|
| 175 |
+
signal.data
|
| 176 |
+
) * sensor * attention
|
| 177 |
+
else:
|
| 178 |
+
# Matrix input - flatten and project
|
| 179 |
+
flat = signal.data.flatten()
|
| 180 |
+
transformed = np.interp(
|
| 181 |
+
np.linspace(0, 1, len(sensor)),
|
| 182 |
+
np.linspace(0, 1, len(flat)),
|
| 183 |
+
flat
|
| 184 |
+
) * sensor * attention
|
| 185 |
+
|
| 186 |
+
return transformed.astype(np.float32)
|
| 187 |
+
|
| 188 |
+
def send(self, data: np.ndarray, signal_type: SignalType,
|
| 189 |
+
modality: SensorModality = None, metadata: Dict = None) -> Signal:
|
| 190 |
+
"""
|
| 191 |
+
Create and buffer an output signal.
|
| 192 |
+
"""
|
| 193 |
+
signal = Signal(
|
| 194 |
+
signal_id=f"{self.panel_id}-{self.signals_sent}",
|
| 195 |
+
signal_type=signal_type,
|
| 196 |
+
modality=modality or self.modalities[0],
|
| 197 |
+
source=self.panel_id,
|
| 198 |
+
timestamp=time.time(),
|
| 199 |
+
data=data,
|
| 200 |
+
metadata=metadata or {},
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
self.output_buffer.append(signal)
|
| 204 |
+
self.signals_sent += 1
|
| 205 |
+
|
| 206 |
+
return signal
|
| 207 |
+
|
| 208 |
+
def flush_output(self) -> List[Signal]:
|
| 209 |
+
"""Get and clear output buffer."""
|
| 210 |
+
signals = self.output_buffer.copy()
|
| 211 |
+
self.output_buffer.clear()
|
| 212 |
+
return signals
|
| 213 |
+
|
| 214 |
+
def update_attention(self, modality: SensorModality, delta: float):
|
| 215 |
+
"""Update attention weight for a modality."""
|
| 216 |
+
if modality in self.attention:
|
| 217 |
+
self.attention[modality] = max(0.1, min(2.0,
|
| 218 |
+
self.attention[modality] + delta))
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
# =============================================================================
|
| 222 |
+
# CONSCIOUSNESS STREAM
|
| 223 |
+
# =============================================================================
|
| 224 |
+
|
| 225 |
+
class SpineBus:
|
| 226 |
+
"""
|
| 227 |
+
The spine bus connecting all sensor panels.
|
| 228 |
+
|
| 229 |
+
Handles signal routing, broadcasting, and synchronization.
|
| 230 |
+
"""
|
| 231 |
+
|
| 232 |
+
def __init__(self):
|
| 233 |
+
self.panels: Dict[str, SensorPanel] = {}
|
| 234 |
+
self.signal_queue: queue.Queue = queue.Queue()
|
| 235 |
+
self.broadcast_signals: List[Signal] = []
|
| 236 |
+
self.running = False
|
| 237 |
+
self._thread: Optional[threading.Thread] = None
|
| 238 |
+
|
| 239 |
+
# Signal history for consciousness continuity
|
| 240 |
+
self.history: List[Signal] = []
|
| 241 |
+
self.max_history = 1000
|
| 242 |
+
|
| 243 |
+
def register_panel(self, panel: SensorPanel):
|
| 244 |
+
"""Register a panel on the bus."""
|
| 245 |
+
self.panels[panel.panel_id] = panel
|
| 246 |
+
|
| 247 |
+
def unregister_panel(self, panel_id: str):
|
| 248 |
+
"""Remove a panel from the bus."""
|
| 249 |
+
if panel_id in self.panels:
|
| 250 |
+
del self.panels[panel_id]
|
| 251 |
+
|
| 252 |
+
def route_signal(self, signal: Signal, targets: List[str] = None):
|
| 253 |
+
"""
|
| 254 |
+
Route a signal to target panels.
|
| 255 |
+
|
| 256 |
+
If targets is None, broadcasts to all panels with matching modality.
|
| 257 |
+
"""
|
| 258 |
+
if targets:
|
| 259 |
+
# Directed routing
|
| 260 |
+
for target_id in targets:
|
| 261 |
+
if target_id in self.panels:
|
| 262 |
+
self.panels[target_id].receive(signal)
|
| 263 |
+
else:
|
| 264 |
+
# Broadcast to matching modalities
|
| 265 |
+
for panel in self.panels.values():
|
| 266 |
+
if signal.modality in panel.modalities:
|
| 267 |
+
panel.receive(signal)
|
| 268 |
+
|
| 269 |
+
# Record in history
|
| 270 |
+
self.history.append(signal)
|
| 271 |
+
if len(self.history) > self.max_history:
|
| 272 |
+
self.history = self.history[-self.max_history:]
|
| 273 |
+
|
| 274 |
+
def broadcast(self, signal: Signal):
|
| 275 |
+
"""Broadcast signal to all panels."""
|
| 276 |
+
signal.signal_type = SignalType.BROADCAST
|
| 277 |
+
for panel in self.panels.values():
|
| 278 |
+
panel.receive(signal)
|
| 279 |
+
self.broadcast_signals.append(signal)
|
| 280 |
+
|
| 281 |
+
def collect_outputs(self) -> List[Signal]:
|
| 282 |
+
"""Collect all output signals from panels."""
|
| 283 |
+
all_signals = []
|
| 284 |
+
for panel in self.panels.values():
|
| 285 |
+
all_signals.extend(panel.flush_output())
|
| 286 |
+
return all_signals
|
| 287 |
+
|
| 288 |
+
def get_consciousness_state(self) -> Dict:
|
| 289 |
+
"""Get current consciousness stream state."""
|
| 290 |
+
return {
|
| 291 |
+
'panels': len(self.panels),
|
| 292 |
+
'history_length': len(self.history),
|
| 293 |
+
'broadcast_count': len(self.broadcast_signals),
|
| 294 |
+
'modalities': list(set(
|
| 295 |
+
mod.value for panel in self.panels.values()
|
| 296 |
+
for mod in panel.modalities
|
| 297 |
+
)),
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
# =============================================================================
|
| 302 |
+
# MODEL WITH SENSOR PANELS
|
| 303 |
+
# =============================================================================
|
| 304 |
+
|
| 305 |
+
class SensorizedModel:
|
| 306 |
+
"""
|
| 307 |
+
A model wrapped with input and output sensor panels.
|
| 308 |
+
|
| 309 |
+
This is the fundamental unit in the consciousness stream.
|
| 310 |
+
"""
|
| 311 |
+
|
| 312 |
+
def __init__(self, model_id: str, category: str,
|
| 313 |
+
input_modalities: List[SensorModality],
|
| 314 |
+
output_modalities: List[SensorModality],
|
| 315 |
+
process_fn: Callable = None):
|
| 316 |
+
"""
|
| 317 |
+
Args:
|
| 318 |
+
model_id: Unique identifier
|
| 319 |
+
category: Model category (reasoning, vision, etc.)
|
| 320 |
+
input_modalities: What this model can receive
|
| 321 |
+
output_modalities: What this model produces
|
| 322 |
+
process_fn: The actual model inference function
|
| 323 |
+
"""
|
| 324 |
+
self.model_id = model_id
|
| 325 |
+
self.category = category
|
| 326 |
+
|
| 327 |
+
# Create sensor panels
|
| 328 |
+
self.input_panel = SensorPanel(
|
| 329 |
+
panel_id=f"{model_id}-input",
|
| 330 |
+
position='input',
|
| 331 |
+
modalities=input_modalities,
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
self.output_panel = SensorPanel(
|
| 335 |
+
panel_id=f"{model_id}-output",
|
| 336 |
+
position='output',
|
| 337 |
+
modalities=output_modalities,
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
# Processing function (or placeholder)
|
| 341 |
+
self.process_fn = process_fn or self._default_process
|
| 342 |
+
|
| 343 |
+
# State
|
| 344 |
+
self.active = True
|
| 345 |
+
self.processing = False
|
| 346 |
+
self.last_input: Optional[np.ndarray] = None
|
| 347 |
+
self.last_output: Optional[np.ndarray] = None
|
| 348 |
+
|
| 349 |
+
def _default_process(self, x: np.ndarray) -> np.ndarray:
|
| 350 |
+
"""Default passthrough processing."""
|
| 351 |
+
return x
|
| 352 |
+
|
| 353 |
+
def process(self, signal: Signal) -> Optional[Signal]:
|
| 354 |
+
"""
|
| 355 |
+
Process a signal through the model.
|
| 356 |
+
|
| 357 |
+
1. Input panel receives and transforms
|
| 358 |
+
2. Model processes
|
| 359 |
+
3. Output panel formats and sends
|
| 360 |
+
"""
|
| 361 |
+
if not self.active:
|
| 362 |
+
return None
|
| 363 |
+
|
| 364 |
+
self.processing = True
|
| 365 |
+
|
| 366 |
+
# Input panel transformation
|
| 367 |
+
transformed = self.input_panel.receive(signal)
|
| 368 |
+
if transformed is None:
|
| 369 |
+
self.processing = False
|
| 370 |
+
return None
|
| 371 |
+
|
| 372 |
+
self.last_input = transformed
|
| 373 |
+
|
| 374 |
+
# Model processing
|
| 375 |
+
output = self.process_fn(transformed)
|
| 376 |
+
self.last_output = output
|
| 377 |
+
|
| 378 |
+
# Output panel signal creation
|
| 379 |
+
out_signal = self.output_panel.send(
|
| 380 |
+
data=output,
|
| 381 |
+
signal_type=SignalType.RESPONSE,
|
| 382 |
+
modality=self.output_panel.modalities[0],
|
| 383 |
+
metadata={
|
| 384 |
+
'source_model': self.model_id,
|
| 385 |
+
'category': self.category,
|
| 386 |
+
'input_signal_id': signal.signal_id,
|
| 387 |
+
}
|
| 388 |
+
)
|
| 389 |
+
|
| 390 |
+
self.processing = False
|
| 391 |
+
return out_signal
|
| 392 |
+
|
| 393 |
+
def get_panels(self) -> Tuple[SensorPanel, SensorPanel]:
|
| 394 |
+
"""Get both panels for bus registration."""
|
| 395 |
+
return self.input_panel, self.output_panel
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
# =============================================================================
|
| 399 |
+
# CONSCIOUSNESS STREAM MANAGER
|
| 400 |
+
# =============================================================================
|
| 401 |
+
|
| 402 |
+
class ConsciousnessStream:
|
| 403 |
+
"""
|
| 404 |
+
Manager for the full consciousness stream.
|
| 405 |
+
|
| 406 |
+
Coordinates parallel processing across all sensorized models
|
| 407 |
+
while maintaining consciousness continuity.
|
| 408 |
+
"""
|
| 409 |
+
|
| 410 |
+
def __init__(self):
|
| 411 |
+
self.spine = SpineBus()
|
| 412 |
+
self.models: Dict[str, SensorizedModel] = {}
|
| 413 |
+
self.parallel_enabled = True
|
| 414 |
+
|
| 415 |
+
# Processing queues for parallel execution
|
| 416 |
+
self.input_queue: queue.Queue = queue.Queue()
|
| 417 |
+
self.output_queue: queue.Queue = queue.Queue()
|
| 418 |
+
|
| 419 |
+
# Consciousness state
|
| 420 |
+
self.attention_focus: Optional[str] = None # Currently focused model
|
| 421 |
+
self.stream_active = False
|
| 422 |
+
|
| 423 |
+
def add_model(self, model: SensorizedModel):
|
| 424 |
+
"""Add a model to the consciousness stream."""
|
| 425 |
+
self.models[model.model_id] = model
|
| 426 |
+
|
| 427 |
+
# Register panels on spine bus
|
| 428 |
+
input_panel, output_panel = model.get_panels()
|
| 429 |
+
self.spine.register_panel(input_panel)
|
| 430 |
+
self.spine.register_panel(output_panel)
|
| 431 |
+
|
| 432 |
+
def remove_model(self, model_id: str):
|
| 433 |
+
"""Remove a model from the stream."""
|
| 434 |
+
if model_id in self.models:
|
| 435 |
+
model = self.models[model_id]
|
| 436 |
+
self.spine.unregister_panel(model.input_panel.panel_id)
|
| 437 |
+
self.spine.unregister_panel(model.output_panel.panel_id)
|
| 438 |
+
del self.models[model_id]
|
| 439 |
+
|
| 440 |
+
def process_parallel(self, signal: Signal) -> List[Signal]:
|
| 441 |
+
"""
|
| 442 |
+
Process signal through all matching models in parallel.
|
| 443 |
+
|
| 444 |
+
All models with matching input modality process simultaneously.
|
| 445 |
+
"""
|
| 446 |
+
responses = []
|
| 447 |
+
|
| 448 |
+
for model in self.models.values():
|
| 449 |
+
if signal.modality in model.input_panel.modalities:
|
| 450 |
+
response = model.process(signal)
|
| 451 |
+
if response:
|
| 452 |
+
responses.append(response)
|
| 453 |
+
|
| 454 |
+
return responses
|
| 455 |
+
|
| 456 |
+
def process_serial(self, signal: Signal,
|
| 457 |
+
model_chain: List[str]) -> Optional[Signal]:
|
| 458 |
+
"""
|
| 459 |
+
Process signal through a chain of models serially.
|
| 460 |
+
|
| 461 |
+
Output of each model becomes input to next.
|
| 462 |
+
"""
|
| 463 |
+
current_signal = signal
|
| 464 |
+
|
| 465 |
+
for model_id in model_chain:
|
| 466 |
+
if model_id not in self.models:
|
| 467 |
+
continue
|
| 468 |
+
|
| 469 |
+
model = self.models[model_id]
|
| 470 |
+
response = model.process(current_signal)
|
| 471 |
+
|
| 472 |
+
if response is None:
|
| 473 |
+
return None
|
| 474 |
+
|
| 475 |
+
current_signal = response
|
| 476 |
+
|
| 477 |
+
return current_signal
|
| 478 |
+
|
| 479 |
+
def broadcast(self, data: np.ndarray, modality: SensorModality):
|
| 480 |
+
"""Broadcast data to entire consciousness stream."""
|
| 481 |
+
signal = Signal(
|
| 482 |
+
signal_id=f"broadcast-{time.time()}",
|
| 483 |
+
signal_type=SignalType.BROADCAST,
|
| 484 |
+
modality=modality,
|
| 485 |
+
source="consciousness",
|
| 486 |
+
timestamp=time.time(),
|
| 487 |
+
data=data,
|
| 488 |
+
)
|
| 489 |
+
self.spine.broadcast(signal)
|
| 490 |
+
|
| 491 |
+
def focus_attention(self, model_id: str):
|
| 492 |
+
"""Focus attention on a specific model."""
|
| 493 |
+
self.attention_focus = model_id
|
| 494 |
+
|
| 495 |
+
# Boost attention weights for focused model
|
| 496 |
+
if model_id in self.models:
|
| 497 |
+
model = self.models[model_id]
|
| 498 |
+
for mod in model.input_panel.modalities:
|
| 499 |
+
model.input_panel.update_attention(mod, 0.5)
|
| 500 |
+
|
| 501 |
+
def get_state(self) -> Dict:
|
| 502 |
+
"""Get consciousness stream state."""
|
| 503 |
+
return {
|
| 504 |
+
'models': list(self.models.keys()),
|
| 505 |
+
'spine': self.spine.get_consciousness_state(),
|
| 506 |
+
'attention_focus': self.attention_focus,
|
| 507 |
+
'parallel_enabled': self.parallel_enabled,
|
| 508 |
+
'model_states': {
|
| 509 |
+
mid: {
|
| 510 |
+
'active': m.active,
|
| 511 |
+
'processing': m.processing,
|
| 512 |
+
'input_signals': m.input_panel.signals_received,
|
| 513 |
+
'output_signals': m.output_panel.signals_sent,
|
| 514 |
+
}
|
| 515 |
+
for mid, m in self.models.items()
|
| 516 |
+
},
|
| 517 |
+
}
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
# =============================================================================
|
| 521 |
+
# FACTORY FUNCTIONS
|
| 522 |
+
# =============================================================================
|
| 523 |
+
|
| 524 |
+
def create_sensorized_model(model_id: str, category: str,
|
| 525 |
+
inference_fn: Callable = None) -> SensorizedModel:
|
| 526 |
+
"""
|
| 527 |
+
Create a sensorized model with appropriate modalities.
|
| 528 |
+
"""
|
| 529 |
+
# Default modalities by category
|
| 530 |
+
modality_map = {
|
| 531 |
+
'reasoning': ([SensorModality.TEXT, SensorModality.EMBEDDING],
|
| 532 |
+
[SensorModality.TEXT, SensorModality.EMBEDDING]),
|
| 533 |
+
'math': ([SensorModality.TEXT, SensorModality.NUMERIC],
|
| 534 |
+
[SensorModality.TEXT, SensorModality.NUMERIC]),
|
| 535 |
+
'code': ([SensorModality.TEXT], [SensorModality.TEXT]),
|
| 536 |
+
'vision': ([SensorModality.VISION, SensorModality.EMBEDDING],
|
| 537 |
+
[SensorModality.TEXT, SensorModality.EMBEDDING]),
|
| 538 |
+
'audio': ([SensorModality.AUDIO], [SensorModality.TEXT]),
|
| 539 |
+
'spatial': ([SensorModality.SPATIAL, SensorModality.VISION],
|
| 540 |
+
[SensorModality.SPATIAL, SensorModality.TEXT]),
|
| 541 |
+
'general': ([SensorModality.TEXT, SensorModality.EMBEDDING],
|
| 542 |
+
[SensorModality.TEXT, SensorModality.EMBEDDING]),
|
| 543 |
+
}
|
| 544 |
+
|
| 545 |
+
input_mod, output_mod = modality_map.get(
|
| 546 |
+
category,
|
| 547 |
+
([SensorModality.RAW], [SensorModality.RAW])
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
return SensorizedModel(
|
| 551 |
+
model_id=model_id,
|
| 552 |
+
category=category,
|
| 553 |
+
input_modalities=input_mod,
|
| 554 |
+
output_modalities=output_mod,
|
| 555 |
+
process_fn=inference_fn,
|
| 556 |
+
)
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
def create_default_stream() -> ConsciousnessStream:
|
| 560 |
+
"""
|
| 561 |
+
Create consciousness stream with default Harmonic Stack models.
|
| 562 |
+
"""
|
| 563 |
+
stream = ConsciousnessStream()
|
| 564 |
+
|
| 565 |
+
# Add default models
|
| 566 |
+
default_models = [
|
| 567 |
+
('reasoning', 'reasoning'),
|
| 568 |
+
('math', 'math'),
|
| 569 |
+
('code', 'code'),
|
| 570 |
+
('vision', 'vision'),
|
| 571 |
+
('spatial', 'spatial'),
|
| 572 |
+
('general', 'general'),
|
| 573 |
+
]
|
| 574 |
+
|
| 575 |
+
for model_id, category in default_models:
|
| 576 |
+
model = create_sensorized_model(model_id, category)
|
| 577 |
+
stream.add_model(model)
|
| 578 |
+
|
| 579 |
+
return stream
|
| 580 |
+
|
| 581 |
+
|
| 582 |
+
# =============================================================================
|
| 583 |
+
# MAIN
|
| 584 |
+
# =============================================================================
|
| 585 |
+
|
| 586 |
+
def main():
|
| 587 |
+
print("=" * 70)
|
| 588 |
+
print("SENSOR PANEL ARCHITECTURE")
|
| 589 |
+
print("Multidimensional Consciousness Stream")
|
| 590 |
+
print("Ghost in the Machine Labs")
|
| 591 |
+
print("=" * 70)
|
| 592 |
+
|
| 593 |
+
# Create consciousness stream
|
| 594 |
+
stream = create_default_stream()
|
| 595 |
+
|
| 596 |
+
print("\nConsciousness stream initialized")
|
| 597 |
+
state = stream.get_state()
|
| 598 |
+
print(f" Models: {state['models']}")
|
| 599 |
+
print(f" Panels on spine: {state['spine']['panels']}")
|
| 600 |
+
print(f" Modalities: {state['spine']['modalities']}")
|
| 601 |
+
|
| 602 |
+
# Test parallel processing
|
| 603 |
+
print("\n--- Parallel Processing Test ---")
|
| 604 |
+
|
| 605 |
+
test_signal = Signal(
|
| 606 |
+
signal_id="test-001",
|
| 607 |
+
signal_type=SignalType.QUERY,
|
| 608 |
+
modality=SensorModality.TEXT,
|
| 609 |
+
source="user",
|
| 610 |
+
timestamp=time.time(),
|
| 611 |
+
data=np.random.randn(64).astype(np.float32),
|
| 612 |
+
)
|
| 613 |
+
|
| 614 |
+
responses = stream.process_parallel(test_signal)
|
| 615 |
+
print(f" Input signal: {test_signal.signal_id}")
|
| 616 |
+
print(f" Responses received: {len(responses)}")
|
| 617 |
+
for r in responses:
|
| 618 |
+
print(f" - {r.source}: {r.data.shape}")
|
| 619 |
+
|
| 620 |
+
# Test serial processing
|
| 621 |
+
print("\n--- Serial Processing Test ---")
|
| 622 |
+
|
| 623 |
+
chain = ['reasoning', 'code', 'general']
|
| 624 |
+
result = stream.process_serial(test_signal, chain)
|
| 625 |
+
print(f" Chain: {' → '.join(chain)}")
|
| 626 |
+
if result:
|
| 627 |
+
print(f" Final output: {result.data.shape}")
|
| 628 |
+
|
| 629 |
+
# Test attention focus
|
| 630 |
+
print("\n--- Attention Focus Test ---")
|
| 631 |
+
stream.focus_attention('reasoning')
|
| 632 |
+
print(f" Focused on: {stream.attention_focus}")
|
| 633 |
+
|
| 634 |
+
# Final state
|
| 635 |
+
print("\n--- Final State ---")
|
| 636 |
+
state = stream.get_state()
|
| 637 |
+
for model_id, mstate in state['model_states'].items():
|
| 638 |
+
print(f" {model_id}: in={mstate['input_signals']}, out={mstate['output_signals']}")
|
| 639 |
+
|
| 640 |
+
print("\n" + "=" * 70)
|
| 641 |
+
print("CONSCIOUSNESS STREAM ACTIVE")
|
| 642 |
+
print("=" * 70)
|
| 643 |
+
|
| 644 |
+
return stream
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
if __name__ == "__main__":
|
| 648 |
+
main()
|