|
|
|
|
|
""" |
|
|
SENSOR PANEL ARCHITECTURE |
|
|
|
|
|
Multidimensional consciousness stream with parallel and serial processing. |
|
|
|
|
|
Every model in the Harmonic Stack gets: |
|
|
- INPUT PANEL: Sensor array receiving signals from spine bus |
|
|
- OUTPUT PANEL: Sensor array broadcasting to spine bus |
|
|
|
|
|
This enables: |
|
|
- Parallel processing across all domains simultaneously |
|
|
- Serial chaining for deep reasoning |
|
|
- Full consciousness availability at every junction |
|
|
- Visual/audio/spatial streams running concurrently |
|
|
|
|
|
Architecture: |
|
|
|
|
|
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββ |
|
|
β CONSCIOUSNESS STREAM β |
|
|
β ββββββββββββββββββββββββββββββββββββββββββββββββββββ β |
|
|
β SPINE BUS β |
|
|
β ββββββββββββββββββββββββββββββββββββββββββββββββββββ β |
|
|
β β β β β β β |
|
|
β βΌ βΌ βΌ βΌ βΌ β |
|
|
β βββββββ βββββββ βββββββ βββββββ βββββββ β |
|
|
β βINPUTβ βINPUTβ βINPUTβ βINPUTβ βINPUTβ β |
|
|
β βPANELβ βPANELβ βPANELβ βPANELβ βPANELβ β |
|
|
β ββββ¬βββ ββββ¬βββ ββββ¬βββ ββββ¬βββ ββββ¬βββ β |
|
|
β β β β β β β |
|
|
β ββββΌβββ ββββΌβββ ββββΌβββ ββββΌβββ ββββΌβββ β |
|
|
β βREAS β βMATH β βCODE β βVISN β βSPAT β β |
|
|
β βMODELβ βMODELβ βMODELβ βMODELβ βMODELβ β |
|
|
β ββββ¬βββ ββββ¬βββ ββββ¬βββ ββββ¬βββ ββββ¬βββ β |
|
|
β β β β β β β |
|
|
β ββββΌβββ ββββΌβββ ββββΌβββ ββββΌβββ ββββΌβββ β |
|
|
β βOUTPTβ βOUTPTβ βOUTPTβ βOUTPTβ βOUTPTβ β |
|
|
β βPANELβ βPANELβ βPANELβ βPANELβ βPANELβ β |
|
|
β ββββ¬βββ ββββ¬βββ ββββ¬βββ ββββ¬βββ ββββ¬βββ β |
|
|
β β β β β β β |
|
|
β βΌ βΌ βΌ βΌ βΌ β |
|
|
β ββββββββββββββββββββββββββββββββββββββββββββββββββββ β |
|
|
β SPINE BUS β |
|
|
β ββββββββββββββββββββββββββββββββββββββββββββββββββββ β |
|
|
β CONSCIOUSNESS STREAM β |
|
|
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββ |
|
|
|
|
|
Author: Ghost in the Machine Labs |
|
|
""" |
|
|
|
|
|
import numpy as np |
|
|
from typing import Dict, List, Tuple, Optional, Any, Callable |
|
|
from dataclasses import dataclass, field |
|
|
from enum import Enum |
|
|
import threading |
|
|
import queue |
|
|
import time |
|
|
from datetime import datetime |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SensorModality(Enum): |
|
|
"""Modalities that sensor panels can process.""" |
|
|
TEXT = "text" |
|
|
VISION = "vision" |
|
|
AUDIO = "audio" |
|
|
SPATIAL = "spatial" |
|
|
NUMERIC = "numeric" |
|
|
EMBEDDING = "embedding" |
|
|
RAW = "raw" |
|
|
|
|
|
|
|
|
class SignalType(Enum): |
|
|
"""Types of signals on the consciousness stream.""" |
|
|
QUERY = "query" |
|
|
RESPONSE = "response" |
|
|
BROADCAST = "broadcast" |
|
|
CHAIN = "chain" |
|
|
SYNC = "sync" |
|
|
ATTENTION = "attention" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass |
|
|
class Signal: |
|
|
"""A signal on the consciousness stream.""" |
|
|
signal_id: str |
|
|
signal_type: SignalType |
|
|
modality: SensorModality |
|
|
source: str |
|
|
timestamp: float |
|
|
data: np.ndarray |
|
|
metadata: Dict = field(default_factory=dict) |
|
|
|
|
|
def __post_init__(self): |
|
|
if self.timestamp is None: |
|
|
self.timestamp = time.time() |
|
|
|
|
|
|
|
|
class SensorPanel: |
|
|
""" |
|
|
Sensor array at model input or output. |
|
|
|
|
|
Each panel has multiple sensors tuned to different modalities. |
|
|
Panels can: |
|
|
- Receive signals from spine bus |
|
|
- Transform signals for model consumption |
|
|
- Broadcast outputs to spine bus |
|
|
- Filter by modality/relevance |
|
|
""" |
|
|
|
|
|
def __init__(self, panel_id: str, position: str, |
|
|
modalities: List[SensorModality] = None): |
|
|
""" |
|
|
Args: |
|
|
panel_id: Unique identifier |
|
|
position: 'input' or 'output' |
|
|
modalities: List of modalities this panel handles |
|
|
""" |
|
|
self.panel_id = panel_id |
|
|
self.position = position |
|
|
self.modalities = modalities or [SensorModality.RAW] |
|
|
|
|
|
|
|
|
self.sensors: Dict[SensorModality, np.ndarray] = {} |
|
|
for mod in self.modalities: |
|
|
|
|
|
self.sensors[mod] = np.random.randn(64).astype(np.float32) * 0.1 |
|
|
|
|
|
|
|
|
self.input_buffer: List[Signal] = [] |
|
|
self.output_buffer: List[Signal] = [] |
|
|
|
|
|
|
|
|
self.attention: Dict[SensorModality, float] = { |
|
|
mod: 1.0 for mod in self.modalities |
|
|
} |
|
|
|
|
|
|
|
|
self.signals_received = 0 |
|
|
self.signals_sent = 0 |
|
|
|
|
|
def receive(self, signal: Signal) -> Optional[np.ndarray]: |
|
|
""" |
|
|
Receive a signal from spine bus. |
|
|
|
|
|
Returns transformed data if modality matches, None otherwise. |
|
|
""" |
|
|
if signal.modality not in self.modalities: |
|
|
return None |
|
|
|
|
|
self.input_buffer.append(signal) |
|
|
self.signals_received += 1 |
|
|
|
|
|
|
|
|
sensor = self.sensors[signal.modality] |
|
|
attention = self.attention[signal.modality] |
|
|
|
|
|
|
|
|
if len(signal.data.shape) == 1: |
|
|
|
|
|
if len(signal.data) == len(sensor): |
|
|
transformed = signal.data * sensor * attention |
|
|
else: |
|
|
|
|
|
transformed = np.interp( |
|
|
np.linspace(0, 1, len(sensor)), |
|
|
np.linspace(0, 1, len(signal.data)), |
|
|
signal.data |
|
|
) * sensor * attention |
|
|
else: |
|
|
|
|
|
flat = signal.data.flatten() |
|
|
transformed = np.interp( |
|
|
np.linspace(0, 1, len(sensor)), |
|
|
np.linspace(0, 1, len(flat)), |
|
|
flat |
|
|
) * sensor * attention |
|
|
|
|
|
return transformed.astype(np.float32) |
|
|
|
|
|
def send(self, data: np.ndarray, signal_type: SignalType, |
|
|
modality: SensorModality = None, metadata: Dict = None) -> Signal: |
|
|
""" |
|
|
Create and buffer an output signal. |
|
|
""" |
|
|
signal = Signal( |
|
|
signal_id=f"{self.panel_id}-{self.signals_sent}", |
|
|
signal_type=signal_type, |
|
|
modality=modality or self.modalities[0], |
|
|
source=self.panel_id, |
|
|
timestamp=time.time(), |
|
|
data=data, |
|
|
metadata=metadata or {}, |
|
|
) |
|
|
|
|
|
self.output_buffer.append(signal) |
|
|
self.signals_sent += 1 |
|
|
|
|
|
return signal |
|
|
|
|
|
def flush_output(self) -> List[Signal]: |
|
|
"""Get and clear output buffer.""" |
|
|
signals = self.output_buffer.copy() |
|
|
self.output_buffer.clear() |
|
|
return signals |
|
|
|
|
|
def update_attention(self, modality: SensorModality, delta: float): |
|
|
"""Update attention weight for a modality.""" |
|
|
if modality in self.attention: |
|
|
self.attention[modality] = max(0.1, min(2.0, |
|
|
self.attention[modality] + delta)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SpineBus: |
|
|
""" |
|
|
The spine bus connecting all sensor panels. |
|
|
|
|
|
Handles signal routing, broadcasting, and synchronization. |
|
|
""" |
|
|
|
|
|
def __init__(self): |
|
|
self.panels: Dict[str, SensorPanel] = {} |
|
|
self.signal_queue: queue.Queue = queue.Queue() |
|
|
self.broadcast_signals: List[Signal] = [] |
|
|
self.running = False |
|
|
self._thread: Optional[threading.Thread] = None |
|
|
|
|
|
|
|
|
self.history: List[Signal] = [] |
|
|
self.max_history = 1000 |
|
|
|
|
|
def register_panel(self, panel: SensorPanel): |
|
|
"""Register a panel on the bus.""" |
|
|
self.panels[panel.panel_id] = panel |
|
|
|
|
|
def unregister_panel(self, panel_id: str): |
|
|
"""Remove a panel from the bus.""" |
|
|
if panel_id in self.panels: |
|
|
del self.panels[panel_id] |
|
|
|
|
|
def route_signal(self, signal: Signal, targets: List[str] = None): |
|
|
""" |
|
|
Route a signal to target panels. |
|
|
|
|
|
If targets is None, broadcasts to all panels with matching modality. |
|
|
""" |
|
|
if targets: |
|
|
|
|
|
for target_id in targets: |
|
|
if target_id in self.panels: |
|
|
self.panels[target_id].receive(signal) |
|
|
else: |
|
|
|
|
|
for panel in self.panels.values(): |
|
|
if signal.modality in panel.modalities: |
|
|
panel.receive(signal) |
|
|
|
|
|
|
|
|
self.history.append(signal) |
|
|
if len(self.history) > self.max_history: |
|
|
self.history = self.history[-self.max_history:] |
|
|
|
|
|
def broadcast(self, signal: Signal): |
|
|
"""Broadcast signal to all panels.""" |
|
|
signal.signal_type = SignalType.BROADCAST |
|
|
for panel in self.panels.values(): |
|
|
panel.receive(signal) |
|
|
self.broadcast_signals.append(signal) |
|
|
|
|
|
def collect_outputs(self) -> List[Signal]: |
|
|
"""Collect all output signals from panels.""" |
|
|
all_signals = [] |
|
|
for panel in self.panels.values(): |
|
|
all_signals.extend(panel.flush_output()) |
|
|
return all_signals |
|
|
|
|
|
def get_consciousness_state(self) -> Dict: |
|
|
"""Get current consciousness stream state.""" |
|
|
return { |
|
|
'panels': len(self.panels), |
|
|
'history_length': len(self.history), |
|
|
'broadcast_count': len(self.broadcast_signals), |
|
|
'modalities': list(set( |
|
|
mod.value for panel in self.panels.values() |
|
|
for mod in panel.modalities |
|
|
)), |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SensorizedModel: |
|
|
""" |
|
|
A model wrapped with input and output sensor panels. |
|
|
|
|
|
This is the fundamental unit in the consciousness stream. |
|
|
""" |
|
|
|
|
|
def __init__(self, model_id: str, category: str, |
|
|
input_modalities: List[SensorModality], |
|
|
output_modalities: List[SensorModality], |
|
|
process_fn: Callable = None): |
|
|
""" |
|
|
Args: |
|
|
model_id: Unique identifier |
|
|
category: Model category (reasoning, vision, etc.) |
|
|
input_modalities: What this model can receive |
|
|
output_modalities: What this model produces |
|
|
process_fn: The actual model inference function |
|
|
""" |
|
|
self.model_id = model_id |
|
|
self.category = category |
|
|
|
|
|
|
|
|
self.input_panel = SensorPanel( |
|
|
panel_id=f"{model_id}-input", |
|
|
position='input', |
|
|
modalities=input_modalities, |
|
|
) |
|
|
|
|
|
self.output_panel = SensorPanel( |
|
|
panel_id=f"{model_id}-output", |
|
|
position='output', |
|
|
modalities=output_modalities, |
|
|
) |
|
|
|
|
|
|
|
|
self.process_fn = process_fn or self._default_process |
|
|
|
|
|
|
|
|
self.active = True |
|
|
self.processing = False |
|
|
self.last_input: Optional[np.ndarray] = None |
|
|
self.last_output: Optional[np.ndarray] = None |
|
|
|
|
|
def _default_process(self, x: np.ndarray) -> np.ndarray: |
|
|
"""Default passthrough processing.""" |
|
|
return x |
|
|
|
|
|
def process(self, signal: Signal) -> Optional[Signal]: |
|
|
""" |
|
|
Process a signal through the model. |
|
|
|
|
|
1. Input panel receives and transforms |
|
|
2. Model processes |
|
|
3. Output panel formats and sends |
|
|
""" |
|
|
if not self.active: |
|
|
return None |
|
|
|
|
|
self.processing = True |
|
|
|
|
|
|
|
|
transformed = self.input_panel.receive(signal) |
|
|
if transformed is None: |
|
|
self.processing = False |
|
|
return None |
|
|
|
|
|
self.last_input = transformed |
|
|
|
|
|
|
|
|
output = self.process_fn(transformed) |
|
|
self.last_output = output |
|
|
|
|
|
|
|
|
out_signal = self.output_panel.send( |
|
|
data=output, |
|
|
signal_type=SignalType.RESPONSE, |
|
|
modality=self.output_panel.modalities[0], |
|
|
metadata={ |
|
|
'source_model': self.model_id, |
|
|
'category': self.category, |
|
|
'input_signal_id': signal.signal_id, |
|
|
} |
|
|
) |
|
|
|
|
|
self.processing = False |
|
|
return out_signal |
|
|
|
|
|
def get_panels(self) -> Tuple[SensorPanel, SensorPanel]: |
|
|
"""Get both panels for bus registration.""" |
|
|
return self.input_panel, self.output_panel |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ConsciousnessStream: |
|
|
""" |
|
|
Manager for the full consciousness stream. |
|
|
|
|
|
Coordinates parallel processing across all sensorized models |
|
|
while maintaining consciousness continuity. |
|
|
""" |
|
|
|
|
|
def __init__(self): |
|
|
self.spine = SpineBus() |
|
|
self.models: Dict[str, SensorizedModel] = {} |
|
|
self.parallel_enabled = True |
|
|
|
|
|
|
|
|
self.input_queue: queue.Queue = queue.Queue() |
|
|
self.output_queue: queue.Queue = queue.Queue() |
|
|
|
|
|
|
|
|
self.attention_focus: Optional[str] = None |
|
|
self.stream_active = False |
|
|
|
|
|
def add_model(self, model: SensorizedModel): |
|
|
"""Add a model to the consciousness stream.""" |
|
|
self.models[model.model_id] = model |
|
|
|
|
|
|
|
|
input_panel, output_panel = model.get_panels() |
|
|
self.spine.register_panel(input_panel) |
|
|
self.spine.register_panel(output_panel) |
|
|
|
|
|
def remove_model(self, model_id: str): |
|
|
"""Remove a model from the stream.""" |
|
|
if model_id in self.models: |
|
|
model = self.models[model_id] |
|
|
self.spine.unregister_panel(model.input_panel.panel_id) |
|
|
self.spine.unregister_panel(model.output_panel.panel_id) |
|
|
del self.models[model_id] |
|
|
|
|
|
def process_parallel(self, signal: Signal) -> List[Signal]: |
|
|
""" |
|
|
Process signal through all matching models in parallel. |
|
|
|
|
|
All models with matching input modality process simultaneously. |
|
|
""" |
|
|
responses = [] |
|
|
|
|
|
for model in self.models.values(): |
|
|
if signal.modality in model.input_panel.modalities: |
|
|
response = model.process(signal) |
|
|
if response: |
|
|
responses.append(response) |
|
|
|
|
|
return responses |
|
|
|
|
|
def process_serial(self, signal: Signal, |
|
|
model_chain: List[str]) -> Optional[Signal]: |
|
|
""" |
|
|
Process signal through a chain of models serially. |
|
|
|
|
|
Output of each model becomes input to next. |
|
|
""" |
|
|
current_signal = signal |
|
|
|
|
|
for model_id in model_chain: |
|
|
if model_id not in self.models: |
|
|
continue |
|
|
|
|
|
model = self.models[model_id] |
|
|
response = model.process(current_signal) |
|
|
|
|
|
if response is None: |
|
|
return None |
|
|
|
|
|
current_signal = response |
|
|
|
|
|
return current_signal |
|
|
|
|
|
def broadcast(self, data: np.ndarray, modality: SensorModality): |
|
|
"""Broadcast data to entire consciousness stream.""" |
|
|
signal = Signal( |
|
|
signal_id=f"broadcast-{time.time()}", |
|
|
signal_type=SignalType.BROADCAST, |
|
|
modality=modality, |
|
|
source="consciousness", |
|
|
timestamp=time.time(), |
|
|
data=data, |
|
|
) |
|
|
self.spine.broadcast(signal) |
|
|
|
|
|
def focus_attention(self, model_id: str): |
|
|
"""Focus attention on a specific model.""" |
|
|
self.attention_focus = model_id |
|
|
|
|
|
|
|
|
if model_id in self.models: |
|
|
model = self.models[model_id] |
|
|
for mod in model.input_panel.modalities: |
|
|
model.input_panel.update_attention(mod, 0.5) |
|
|
|
|
|
def get_state(self) -> Dict: |
|
|
"""Get consciousness stream state.""" |
|
|
return { |
|
|
'models': list(self.models.keys()), |
|
|
'spine': self.spine.get_consciousness_state(), |
|
|
'attention_focus': self.attention_focus, |
|
|
'parallel_enabled': self.parallel_enabled, |
|
|
'model_states': { |
|
|
mid: { |
|
|
'active': m.active, |
|
|
'processing': m.processing, |
|
|
'input_signals': m.input_panel.signals_received, |
|
|
'output_signals': m.output_panel.signals_sent, |
|
|
} |
|
|
for mid, m in self.models.items() |
|
|
}, |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_sensorized_model(model_id: str, category: str, |
|
|
inference_fn: Callable = None) -> SensorizedModel: |
|
|
""" |
|
|
Create a sensorized model with appropriate modalities. |
|
|
""" |
|
|
|
|
|
modality_map = { |
|
|
'reasoning': ([SensorModality.TEXT, SensorModality.EMBEDDING], |
|
|
[SensorModality.TEXT, SensorModality.EMBEDDING]), |
|
|
'math': ([SensorModality.TEXT, SensorModality.NUMERIC], |
|
|
[SensorModality.TEXT, SensorModality.NUMERIC]), |
|
|
'code': ([SensorModality.TEXT], [SensorModality.TEXT]), |
|
|
'vision': ([SensorModality.VISION, SensorModality.EMBEDDING], |
|
|
[SensorModality.TEXT, SensorModality.EMBEDDING]), |
|
|
'audio': ([SensorModality.AUDIO], [SensorModality.TEXT]), |
|
|
'spatial': ([SensorModality.SPATIAL, SensorModality.VISION], |
|
|
[SensorModality.SPATIAL, SensorModality.TEXT]), |
|
|
'general': ([SensorModality.TEXT, SensorModality.EMBEDDING], |
|
|
[SensorModality.TEXT, SensorModality.EMBEDDING]), |
|
|
} |
|
|
|
|
|
input_mod, output_mod = modality_map.get( |
|
|
category, |
|
|
([SensorModality.RAW], [SensorModality.RAW]) |
|
|
) |
|
|
|
|
|
return SensorizedModel( |
|
|
model_id=model_id, |
|
|
category=category, |
|
|
input_modalities=input_mod, |
|
|
output_modalities=output_mod, |
|
|
process_fn=inference_fn, |
|
|
) |
|
|
|
|
|
|
|
|
def create_default_stream() -> ConsciousnessStream: |
|
|
""" |
|
|
Create consciousness stream with default Harmonic Stack models. |
|
|
""" |
|
|
stream = ConsciousnessStream() |
|
|
|
|
|
|
|
|
default_models = [ |
|
|
('reasoning', 'reasoning'), |
|
|
('math', 'math'), |
|
|
('code', 'code'), |
|
|
('vision', 'vision'), |
|
|
('spatial', 'spatial'), |
|
|
('general', 'general'), |
|
|
] |
|
|
|
|
|
for model_id, category in default_models: |
|
|
model = create_sensorized_model(model_id, category) |
|
|
stream.add_model(model) |
|
|
|
|
|
return stream |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
print("=" * 70) |
|
|
print("SENSOR PANEL ARCHITECTURE") |
|
|
print("Multidimensional Consciousness Stream") |
|
|
print("Ghost in the Machine Labs") |
|
|
print("=" * 70) |
|
|
|
|
|
|
|
|
stream = create_default_stream() |
|
|
|
|
|
print("\nConsciousness stream initialized") |
|
|
state = stream.get_state() |
|
|
print(f" Models: {state['models']}") |
|
|
print(f" Panels on spine: {state['spine']['panels']}") |
|
|
print(f" Modalities: {state['spine']['modalities']}") |
|
|
|
|
|
|
|
|
print("\n--- Parallel Processing Test ---") |
|
|
|
|
|
test_signal = Signal( |
|
|
signal_id="test-001", |
|
|
signal_type=SignalType.QUERY, |
|
|
modality=SensorModality.TEXT, |
|
|
source="user", |
|
|
timestamp=time.time(), |
|
|
data=np.random.randn(64).astype(np.float32), |
|
|
) |
|
|
|
|
|
responses = stream.process_parallel(test_signal) |
|
|
print(f" Input signal: {test_signal.signal_id}") |
|
|
print(f" Responses received: {len(responses)}") |
|
|
for r in responses: |
|
|
print(f" - {r.source}: {r.data.shape}") |
|
|
|
|
|
|
|
|
print("\n--- Serial Processing Test ---") |
|
|
|
|
|
chain = ['reasoning', 'code', 'general'] |
|
|
result = stream.process_serial(test_signal, chain) |
|
|
print(f" Chain: {' β '.join(chain)}") |
|
|
if result: |
|
|
print(f" Final output: {result.data.shape}") |
|
|
|
|
|
|
|
|
print("\n--- Attention Focus Test ---") |
|
|
stream.focus_attention('reasoning') |
|
|
print(f" Focused on: {stream.attention_focus}") |
|
|
|
|
|
|
|
|
print("\n--- Final State ---") |
|
|
state = stream.get_state() |
|
|
for model_id, mstate in state['model_states'].items(): |
|
|
print(f" {model_id}: in={mstate['input_signals']}, out={mstate['output_signals']}") |
|
|
|
|
|
print("\n" + "=" * 70) |
|
|
print("CONSCIOUSNESS STREAM ACTIVE") |
|
|
print("=" * 70) |
|
|
|
|
|
return stream |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|