walidsobhie-code Claude Opus 4.6 commited on
Commit ·
bfc7d04
1
Parent(s): 49ffe54
feat: add TypeScript modules and reorganize project structure
Browse filesNew Features:
- Voice Integration: TypeScript modules for voice recording, synthesis, cloning
- LLM Service: Multi-provider client (OpenAI, Anthropic, Ollama)
- MCP Client: Model Context Protocol integration
- Code Indexing: Semantic code search (RAG)
Training Data:
- Added 46 tool schemas (from RTMP extraction)
- Added RTMP-derived training examples
- Updated manifest to v0.3.0
Project Organization:
- Added src/voice, src/llm, src/mcp, src/indexing modules
- Created DIRECTORY_STRUCTURE.md documentation
- Moved docs to docs/guides and docs/reference
- Added TypeScript configuration
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
- .env.example +16 -0
- DIRECTORY_STRUCTURE.md +120 -0
- README.md +6 -1
- a.txt +0 -1
- b.txt +0 -1
- colab_train_stack29.ipynb +125 -149
- COLAB_TRAINING.md → docs/guides/COLAB_TRAINING.md +0 -0
- EVALUATION.md → docs/guides/EVALUATION.md +0 -0
- BENCHMARKS.md → docs/reference/BENCHMARKS.md +0 -0
- MODEL_CARD.md → docs/reference/MODEL_CARD.md +0 -0
- TOOLS.md → docs/reference/TOOLS.md +0 -0
- demo_stack.py → examples/demo_stack.py +0 -0
- inference_examples.py → examples/inference_examples.py +0 -0
- new.txt +0 -1
- ollama_model/README.md +0 -172
- package-lock.json +1844 -0
- package.json +16 -0
- convert_gguf.py → scripts/convert_gguf.py +0 -0
- scripts/extract_patterns_from_git.py +457 -0
- scripts/extract_rtmp_tools.ts +115 -0
- scripts/fuse_lora_adapters.py +413 -0
- scripts/generate_from_rtmp.ts +114 -0
- src/examples/voice-integration.ts +149 -0
- src/indexing/CodeIndexer.ts +284 -0
- src/indexing/index.ts +18 -0
- src/llm/LLMService.ts +354 -0
- src/llm/index.ts +26 -0
- src/mcp/MCPClient.ts +265 -0
- src/mcp/index.ts +21 -0
- stack-2.9-cli.py → src/stack-2.9-cli.py +0 -0
- stack.py → src/stack.py +0 -0
- src/utils/logger.ts +52 -0
- src/voice/VoiceApiClient.ts +167 -0
- src/voice/VoiceRecording.ts +186 -0
- src/voice/VoiceTools.ts +282 -0
- src/voice/index.ts +109 -0
- stack-2.9-training/prepare_dataset.py +291 -50
- stack-2.9-training/run_training.py +348 -0
- stack-2.9-training/train_config.yaml +24 -15
- stack-2.9-training/train_lora.py +6 -2
- stack_cli/tools.py +1 -1
- test-results/test_results.xml +0 -112
- test.py +0 -1
- test_imports.py +0 -51
- training-data/manifest.json +11 -10
- training-data/src-derived/rtmp_examples.jsonl +3 -0
- training-data/tools/catalog.json +101 -0
- tsconfig.json +22 -0
.env.example
CHANGED
|
@@ -6,3 +6,19 @@ OUTPUT_DIR=./output
|
|
| 6 |
|
| 7 |
# Audio Settings
|
| 8 |
SAMPLE_RATE=44100
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
# Audio Settings
|
| 8 |
SAMPLE_RATE=44100
|
| 9 |
+
|
| 10 |
+
# Voice API (TypeScript client)
|
| 11 |
+
VOICE_API_URL=http://localhost:8000
|
| 12 |
+
VOICE_TIMEOUT=30000
|
| 13 |
+
|
| 14 |
+
# LLM Providers
|
| 15 |
+
OPENAI_API_KEY=sk-...
|
| 16 |
+
OPENAI_MODEL=gpt-4
|
| 17 |
+
ANTHROPIC_API_KEY=sk-ant-...
|
| 18 |
+
ANTHROPIC_MODEL=claude-3-sonnet-20240229
|
| 19 |
+
OLLAMA_BASE_URL=http://localhost:11434
|
| 20 |
+
OLLAMA_MODEL=llama2
|
| 21 |
+
|
| 22 |
+
# Voice API Configuration
|
| 23 |
+
VOICE_API_URL=http://localhost:8000
|
| 24 |
+
VOICE_TIMEOUT=30000
|
DIRECTORY_STRUCTURE.md
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Stack 2.9 Directory Structure
|
| 2 |
+
|
| 3 |
+
This document describes the organized structure of the Stack 2.9 project.
|
| 4 |
+
|
| 5 |
+
## Directory Overview
|
| 6 |
+
|
| 7 |
+
```
|
| 8 |
+
stack-2.9/
|
| 9 |
+
├── src/ # TypeScript source code (voice, LLM, MCP, indexing modules)
|
| 10 |
+
├── docs/ # Project documentation
|
| 11 |
+
├── training-data/ # Training datasets and manifests
|
| 12 |
+
├── scripts/ # Build and utility scripts
|
| 13 |
+
├── tests/ # Test files
|
| 14 |
+
├── config/ # Configuration files (package.json, tsconfig.json, etc.)
|
| 15 |
+
│
|
| 16 |
+
├── stack-2.9-training/ # Model training code
|
| 17 |
+
├── stack-2.9-deploy/ # Deployment configurations
|
| 18 |
+
├── stack-2.9-eval/ # Evaluation and benchmarking
|
| 19 |
+
├── stack-2.9-voice/ # Voice API server (Python)
|
| 20 |
+
├── stack-2.9-docs/ # Generated documentation
|
| 21 |
+
│
|
| 22 |
+
├── examples/ # Example usage files
|
| 23 |
+
├── benchmarks/ # Benchmark scripts
|
| 24 |
+
└── .github/ # GitHub Actions workflows
|
| 25 |
+
```
|
| 26 |
+
|
| 27 |
+
## Directory Details
|
| 28 |
+
|
| 29 |
+
### `src/` - TypeScript Source Code
|
| 30 |
+
|
| 31 |
+
Core modules for Stack 2.9 AI assistant:
|
| 32 |
+
|
| 33 |
+
- **src/voice/** - Voice integration (recording, synthesis, cloning)
|
| 34 |
+
- **src/llm/** - Multi-provider LLM client (OpenAI, Anthropic, Ollama)
|
| 35 |
+
- **src/mcp/** - Model Context Protocol client
|
| 36 |
+
- **src/indexing/** - Code indexing for semantic search (RAG)
|
| 37 |
+
- **src/tools/** - Tool implementations
|
| 38 |
+
- **src/agent/** - Agent logic
|
| 39 |
+
- **src/providers/** - Provider integrations
|
| 40 |
+
|
| 41 |
+
### `docs/` - Documentation
|
| 42 |
+
|
| 43 |
+
- ARCHITECTURE.md - System architecture
|
| 44 |
+
- SETUP.md - Setup instructions
|
| 45 |
+
- API.md - API documentation
|
| 46 |
+
- TOOLS.md - Tool reference
|
| 47 |
+
- BENCHMARKS.md - Performance benchmarks
|
| 48 |
+
- guides/ - Usage guides
|
| 49 |
+
|
| 50 |
+
### `training-data/` - Training Datasets
|
| 51 |
+
|
| 52 |
+
- training-data/tools/catalog.json - Tool schemas (46 tools)
|
| 53 |
+
- training-data/synthetic/ - Synthetic training examples
|
| 54 |
+
- training-data/code-pairs/ - Code-comment pairs
|
| 55 |
+
- training-data/src-derived/ - RTMP-extracted examples
|
| 56 |
+
- training-data/final/ - Final merged datasets
|
| 57 |
+
|
| 58 |
+
### `scripts/` - Utility Scripts
|
| 59 |
+
|
| 60 |
+
- scripts/extract_rtmp_tools.ts - Extract tool schemas from RTMP
|
| 61 |
+
- scripts/generate_from_rtmp.ts - Generate training data from RTMP
|
| 62 |
+
- scripts/combine_datasets.py - Merge training datasets
|
| 63 |
+
- scripts/download_public_datasets.py - Download public datasets
|
| 64 |
+
|
| 65 |
+
### `stack-2.9-*` - Component Directories
|
| 66 |
+
|
| 67 |
+
- **stack-2.9-training/** - Model fine-tuning code (LoRA, quantization)
|
| 68 |
+
- **stack-2.9-deploy/** - Docker and deployment configs
|
| 69 |
+
- **stack-2.9-eval/** - Human eval and benchmarks
|
| 70 |
+
- **stack-2.9-voice/** - Python FastAPI voice server
|
| 71 |
+
- **stack-2.9-docs/** - Auto-generated docs
|
| 72 |
+
|
| 73 |
+
## Configuration Files
|
| 74 |
+
|
| 75 |
+
| File | Purpose |
|
| 76 |
+
|------|---------|
|
| 77 |
+
| package.json | npm dependencies |
|
| 78 |
+
| tsconfig.json | TypeScript config |
|
| 79 |
+
| pyproject.toml | Python project config |
|
| 80 |
+
| requirements.txt | Python dependencies |
|
| 81 |
+
| .env.example | Environment variables template |
|
| 82 |
+
| Makefile | Build targets |
|
| 83 |
+
|
| 84 |
+
## Root Documentation Files
|
| 85 |
+
|
| 86 |
+
- README.md - Main project readme
|
| 87 |
+
- CHANGELOG.md - Version history
|
| 88 |
+
- CONTRIBUTING.md - Contribution guidelines
|
| 89 |
+
- LICENSE - Apache 2.0 license
|
| 90 |
+
- SECURITY.md - Security policy
|
| 91 |
+
|
| 92 |
+
## Deprecated/Merged Directories
|
| 93 |
+
|
| 94 |
+
The following directories are deprecated and their contents moved to other locations:
|
| 95 |
+
|
| 96 |
+
- `stack-2.9-cli/` → merged into `src/cli/`
|
| 97 |
+
- `stack_cli/` → merged into `src/cli/`
|
| 98 |
+
- `stack_2_9_training/` → merged into `stack-2.9-training/`
|
| 99 |
+
- `space/` → use `stack-2.9-deploy/`
|
| 100 |
+
- `self_evolution/` → use `stack-2.9-training/`
|
| 101 |
+
- `website/` → external repository
|
| 102 |
+
- `benchmarks/` → use `stack-2.9-eval/`
|
| 103 |
+
|
| 104 |
+
## Getting Started
|
| 105 |
+
|
| 106 |
+
1. **Install dependencies**: `npm install && pip install -r requirements.txt`
|
| 107 |
+
2. **Configure environment**: Copy `.env.example` to `.env`
|
| 108 |
+
3. **Run voice server**: `cd stack-2.9-voice && uvicorn voice_server:app`
|
| 109 |
+
4. **Use TypeScript modules**: Import from `src/`
|
| 110 |
+
|
| 111 |
+
## Adding New Modules
|
| 112 |
+
|
| 113 |
+
New TypeScript modules should follow this structure:
|
| 114 |
+
|
| 115 |
+
```
|
| 116 |
+
src/<module-name>/
|
| 117 |
+
├── index.ts # Main exports
|
| 118 |
+
├── <ModuleName>.ts # Main implementation
|
| 119 |
+
└── <ModuleName>Tool.ts # Tool implementation (if applicable)
|
| 120 |
+
```
|
README.md
CHANGED
|
@@ -27,8 +27,13 @@ Stack 2.9 is an open-source AI coding assistant powered by Qwen2.5-Coder-32B. It
|
|
| 27 |
| Feature | Description |
|
| 28 |
|---------|-------------|
|
| 29 |
| **🧠 Pattern Memory** | Learns from interactions. Stores successful patterns, tracks success rates, and retrieves relevant precedents for new tasks |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
| **💻 Code Generation** | Evaluation in progress (see Benchmarks section) |
|
| 31 |
-
| **🔧
|
| 32 |
| **🌐 Multi-Provider** | Works with Ollama, OpenAI, Anthropic, OpenRouter, Together AI — or bring your own model |
|
| 33 |
| **📱 Terminal UI** | Beautiful interactive CLI with chat, benchmarks, and training |
|
| 34 |
| **🔒 Self-Hosted** | Run locally, own your data, deploy anywhere |
|
|
|
|
| 27 |
| Feature | Description |
|
| 28 |
|---------|-------------|
|
| 29 |
| **🧠 Pattern Memory** | Learns from interactions. Stores successful patterns, tracks success rates, and retrieves relevant precedents for new tasks |
|
| 30 |
+
| **🔊 Voice Integration** | Voice cloning and TTS with Coqui XTTS. Record voice commands and hear responses |
|
| 31 |
+
| **🎤 Speech-to-Text** | Voice recording with microphone input, silence detection |
|
| 32 |
+
| **🤖 Multi-Provider LLM** | Works with Ollama, OpenAI, Anthropic - unified client with automatic fallback |
|
| 33 |
+
| **🔗 MCP Support** | Model Context Protocol integration for extensible tools |
|
| 34 |
+
| **🔍 Code Indexing (RAG)** | Semantic code search - index your codebase for intelligent queries |
|
| 35 |
| **💻 Code Generation** | Evaluation in progress (see Benchmarks section) |
|
| 36 |
+
| **🔧 46 Built-in Tools** | File ops, search, shell commands, git, voice tools, MCP tools |
|
| 37 |
| **🌐 Multi-Provider** | Works with Ollama, OpenAI, Anthropic, OpenRouter, Together AI — or bring your own model |
|
| 38 |
| **📱 Terminal UI** | Beautiful interactive CLI with chat, benchmarks, and training |
|
| 39 |
| **🔒 Self-Hosted** | Run locally, own your data, deploy anywhere |
|
a.txt
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
x
|
|
|
|
|
|
b.txt
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
y
|
|
|
|
|
|
colab_train_stack29.ipynb
CHANGED
|
@@ -4,7 +4,7 @@
|
|
| 4 |
"cell_type": "markdown",
|
| 5 |
"metadata": {},
|
| 6 |
"source": [
|
| 7 |
-
"# 🚀 Stack 2.9 - Colab Training Notebook\n",
|
| 8 |
"\n",
|
| 9 |
"**Zero-cost training on Google Colab free tier**\n",
|
| 10 |
"\n",
|
|
@@ -12,14 +12,16 @@
|
|
| 12 |
"\n",
|
| 13 |
"⏱️ **Expected runtime:** 3-5 hours\n",
|
| 14 |
"💾 **VRAM needed:** ~12GB (fits in T4's 15GB)\n",
|
| 15 |
-
"📦 **Output:** `./
|
| 16 |
"\n",
|
| 17 |
"---\n",
|
| 18 |
"\n",
|
|
|
|
|
|
|
| 19 |
"**Instructions:**\n",
|
| 20 |
"1. Runtime → Change runtime type → **GPU (T4)**\n",
|
| 21 |
-
"2. Run
|
| 22 |
-
"3.
|
| 23 |
"\n",
|
| 24 |
"---"
|
| 25 |
]
|
|
@@ -30,7 +32,7 @@
|
|
| 30 |
"metadata": {},
|
| 31 |
"outputs": [],
|
| 32 |
"source": [
|
| 33 |
-
"# Check GPU
|
| 34 |
"!nvidia-smi"
|
| 35 |
]
|
| 36 |
},
|
|
@@ -38,9 +40,11 @@
|
|
| 38 |
"cell_type": "markdown",
|
| 39 |
"metadata": {},
|
| 40 |
"source": [
|
| 41 |
-
"## 1️⃣ Mount Google Drive (
|
|
|
|
|
|
|
| 42 |
"\n",
|
| 43 |
-
"
|
| 44 |
]
|
| 45 |
},
|
| 46 |
{
|
|
@@ -52,17 +56,22 @@
|
|
| 52 |
"from google.colab import drive\n",
|
| 53 |
"drive.mount('/content/drive')\n",
|
| 54 |
"\n",
|
| 55 |
-
"# Set
|
| 56 |
-
"
|
| 57 |
-
"\n",
|
| 58 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
]
|
| 60 |
},
|
| 61 |
{
|
| 62 |
"cell_type": "markdown",
|
| 63 |
"metadata": {},
|
| 64 |
"source": [
|
| 65 |
-
"## 2️⃣ Clone
|
| 66 |
]
|
| 67 |
},
|
| 68 |
{
|
|
@@ -71,32 +80,43 @@
|
|
| 71 |
"metadata": {},
|
| 72 |
"outputs": [],
|
| 73 |
"source": [
|
| 74 |
-
"
|
| 75 |
-
"os.chdir('/content')\n",
|
| 76 |
-
"\n",
|
| 77 |
-
"# Clone the Stack 2.9 repository if not already present\n",
|
| 78 |
"if not os.path.exists('stack-2.9'):\n",
|
| 79 |
" !git clone https://github.com/my-ai-stack/stack-2.9.git\n",
|
| 80 |
"\n",
|
| 81 |
-
"os.chdir('
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
"\n",
|
| 83 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
"!pip install --upgrade pip\n",
|
| 85 |
"!pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118\n",
|
| 86 |
-
"!pip install transformers==4.40.0 peft==0.10.0 accelerate bitsandbytes==0.43.0 datasets pyyaml"
|
|
|
|
| 87 |
]
|
| 88 |
},
|
| 89 |
{
|
| 90 |
"cell_type": "markdown",
|
| 91 |
"metadata": {},
|
| 92 |
"source": [
|
| 93 |
-
"##
|
| 94 |
-
"\n",
|
| 95 |
-
"### Option A: Use existing training data from repository\n",
|
| 96 |
-
"The repo already has `training-data/final/train.jsonl` and `eval.jsonl` if you previously ran data collection.\n",
|
| 97 |
"\n",
|
| 98 |
-
"
|
| 99 |
-
"Recommended for first run to verify everything works quickly."
|
| 100 |
]
|
| 101 |
},
|
| 102 |
{
|
|
@@ -105,23 +125,19 @@
|
|
| 105 |
"metadata": {},
|
| 106 |
"outputs": [],
|
| 107 |
"source": [
|
| 108 |
-
"# Create mini dataset (5K examples)\n",
|
| 109 |
"!python scripts/create_mini_dataset.py --size 5000 --output data_mini/train_mini.jsonl --source training-data/final/train.jsonl\n",
|
| 110 |
"\n",
|
| 111 |
-
"#
|
| 112 |
-
"!ls -lh data_mini/
|
| 113 |
-
"\n",
|
| 114 |
-
"# If you want to use the full dataset instead, skip the mini creation and use:\n",
|
| 115 |
-
"# training-data/final/train.jsonl (and eval.jsonl if available)"
|
| 116 |
]
|
| 117 |
},
|
| 118 |
{
|
| 119 |
"cell_type": "markdown",
|
| 120 |
"metadata": {},
|
| 121 |
"source": [
|
| 122 |
-
"##
|
| 123 |
"\n",
|
| 124 |
-
"
|
| 125 |
]
|
| 126 |
},
|
| 127 |
{
|
|
@@ -130,56 +146,62 @@
|
|
| 130 |
"metadata": {},
|
| 131 |
"outputs": [],
|
| 132 |
"source": [
|
| 133 |
-
"# Copy the Colab config
|
| 134 |
"!cp stack_2_9_training/train_config_colab.yaml stack_2_9_training/train_config.yaml\n",
|
| 135 |
"\n",
|
| 136 |
-
"#
|
| 137 |
-
"
|
| 138 |
-
"
|
| 139 |
-
"
|
| 140 |
-
"\n",
|
| 141 |
-
"print(\"
|
| 142 |
-
"
|
| 143 |
-
" lines = f.readlines()\n",
|
| 144 |
-
" for i, line in enumerate(lines[:50]): # Show first 50 lines\n",
|
| 145 |
-
" print(f\"{i+1}: {line.rstrip()}\")\n",
|
| 146 |
-
"print(\"...\")"
|
| 147 |
]
|
| 148 |
},
|
| 149 |
{
|
| 150 |
"cell_type": "markdown",
|
| 151 |
"metadata": {},
|
| 152 |
"source": [
|
| 153 |
-
"##
|
|
|
|
|
|
|
| 154 |
"\n",
|
| 155 |
-
"
|
| 156 |
"\n",
|
| 157 |
-
"
|
| 158 |
"\n",
|
| 159 |
-
"
|
| 160 |
]
|
| 161 |
},
|
| 162 |
{
|
| 163 |
"cell_type": "code",
|
| 164 |
"execution_count": null,
|
| 165 |
-
"metadata": {
|
| 166 |
-
"tags": []
|
| 167 |
-
},
|
| 168 |
"outputs": [],
|
| 169 |
"source": [
|
| 170 |
-
"
|
| 171 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
"\n",
|
| 173 |
"!cd stack_2_9_training && python -m train_lora --config train_config.yaml\n",
|
| 174 |
"\n",
|
| 175 |
-
"
|
|
|
|
|
|
|
|
|
|
| 176 |
]
|
| 177 |
},
|
| 178 |
{
|
| 179 |
"cell_type": "markdown",
|
| 180 |
"metadata": {},
|
| 181 |
"source": [
|
| 182 |
-
"##
|
| 183 |
]
|
| 184 |
},
|
| 185 |
{
|
|
@@ -188,21 +210,17 @@
|
|
| 188 |
"metadata": {},
|
| 189 |
"outputs": [],
|
| 190 |
"source": [
|
| 191 |
-
"!ls -
|
| 192 |
-
"\
|
| 193 |
-
"# If training succeeded, you should see:\n",
|
| 194 |
-
"# - adapter_model.bin (or multiple checkpoint-XXX folders)\n",
|
| 195 |
-
"# - training_args.bin\n",
|
| 196 |
-
"# - config.json"
|
| 197 |
]
|
| 198 |
},
|
| 199 |
{
|
| 200 |
"cell_type": "markdown",
|
| 201 |
"metadata": {},
|
| 202 |
"source": [
|
| 203 |
-
"##
|
| 204 |
"\n",
|
| 205 |
-
"Combines the trained adapter
|
| 206 |
]
|
| 207 |
},
|
| 208 |
{
|
|
@@ -221,9 +239,7 @@
|
|
| 221 |
"cell_type": "markdown",
|
| 222 |
"metadata": {},
|
| 223 |
"source": [
|
| 224 |
-
"##
|
| 225 |
-
"\n",
|
| 226 |
-
"Quick sanity check: does the model generate reasonable code?"
|
| 227 |
]
|
| 228 |
},
|
| 229 |
{
|
|
@@ -235,98 +251,57 @@
|
|
| 235 |
"from transformers import AutoTokenizer, AutoModelForCausalLM\n",
|
| 236 |
"import torch\n",
|
| 237 |
"\n",
|
| 238 |
-
"# Load merged model\n",
|
| 239 |
"model_path = \"./model_final\"\n",
|
| 240 |
-
"
|
| 241 |
-
"
|
| 242 |
-
"
|
| 243 |
-
"
|
| 244 |
-
"
|
| 245 |
-
"
|
| 246 |
-
"
|
| 247 |
-
"\n",
|
| 248 |
-
"
|
| 249 |
-
"prompt = \"Write a Python function to calculate factorial recursively:\\n\\n```python\\n\"\n",
|
| 250 |
-
"inputs = tokenizer(prompt, return_tensors=\"pt\").to(model.device)\n",
|
| 251 |
-
"\n",
|
| 252 |
-
"print(\"Generating...\")\n",
|
| 253 |
-
"with torch.no_grad():\n",
|
| 254 |
-
" outputs = model.generate(\n",
|
| 255 |
-
" **inputs,\n",
|
| 256 |
-
" max_new_tokens=200,\n",
|
| 257 |
-
" temperature=0.2,\n",
|
| 258 |
-
" do_sample=True,\n",
|
| 259 |
-
" pad_token_id=tokenizer.eos_token_id\n",
|
| 260 |
" )\n",
|
| 261 |
-
"\n",
|
| 262 |
-
"
|
| 263 |
-
"
|
| 264 |
-
"
|
| 265 |
-
"print(\"
|
| 266 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 267 |
]
|
| 268 |
},
|
| 269 |
{
|
| 270 |
"cell_type": "markdown",
|
| 271 |
"metadata": {},
|
| 272 |
"source": [
|
| 273 |
-
"##
|
| 274 |
-
"\n",
|
| 275 |
-
"If you want to publish your model, push it to Hugging Face and then apply to Together AI."
|
| 276 |
-
]
|
| 277 |
-
},
|
| 278 |
-
{
|
| 279 |
-
"cell_type": "code",
|
| 280 |
-
"execution_count": null,
|
| 281 |
-
"metadata": {},
|
| 282 |
-
"outputs": [],
|
| 283 |
-
"source": [
|
| 284 |
-
"from huggingface_hub import HfApi\n",
|
| 285 |
-
"\n",
|
| 286 |
-
"# You need a Hugging Face account and token\n",
|
| 287 |
-
"HF_TOKEN = input(\"Enter your Hugging Face token: \").strip()\n",
|
| 288 |
-
"\n",
|
| 289 |
-
"api = HfApi(token=HF_TOKEN)\n",
|
| 290 |
-
"\n",
|
| 291 |
-
"# Choose a repo name\n",
|
| 292 |
-
"repo_id = input(\"Enter repository name (e.g., your-org/stack-2.9-7b-lora): \").strip()\n",
|
| 293 |
"\n",
|
| 294 |
-
"
|
| 295 |
-
"\n",
|
| 296 |
-
"# Create repo if needed\n",
|
| 297 |
-
"api.create_repo(repo_id=repo_id, exist_ok=True)\n",
|
| 298 |
-
"\n",
|
| 299 |
-
"# Upload model\n",
|
| 300 |
-
"api.upload_folder(\n",
|
| 301 |
-
" folder_path=\"./model_final\",\n",
|
| 302 |
-
" repo_id=repo_id,\n",
|
| 303 |
-
" repo_type=\"model\"\n",
|
| 304 |
-
")\n",
|
| 305 |
-
"\n",
|
| 306 |
-
"print(f\"\\n✅ Model uploaded to https://huggingface.co/{repo_id}\")\n",
|
| 307 |
-
"\n",
|
| 308 |
-
"# Update docs\n",
|
| 309 |
-
"print(\"\\nNext steps:\")\n",
|
| 310 |
-
"print(\"1. Update TOGETHER_AI.md with your model ID\")\n",
|
| 311 |
-
"print(\"2. Update README.md badges with real scores after evaluation\")\n",
|
| 312 |
-
"print(\"3. Submit to Together AI model submission form\")"
|
| 313 |
-
]
|
| 314 |
-
},
|
| 315 |
-
{
|
| 316 |
-
"cell_type": "markdown",
|
| 317 |
-
"metadata": {},
|
| 318 |
-
"source": [
|
| 319 |
-
"## 🎉 Training Complete!\n",
|
| 320 |
-
"\n",
|
| 321 |
-
"You now have:\n",
|
| 322 |
-
"- ✅ Trained LoRA adapter in `./adapters_colab/`\n",
|
| 323 |
-
"- ✅ Merged full model in `./model_final/`\n",
|
| 324 |
-
"- ✅ Model card and documentation\n",
|
| 325 |
"\n",
|
| 326 |
"**Next steps:**\n",
|
| 327 |
-
"1.
|
| 328 |
-
"2.
|
| 329 |
-
"3.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 330 |
"\n",
|
| 331 |
"**Need help?** See `COLAB_TRAINING.md` for detailed troubleshooting."
|
| 332 |
]
|
|
@@ -335,7 +310,7 @@
|
|
| 335 |
"metadata": {
|
| 336 |
"accelerator": "GPU",
|
| 337 |
"colab": {
|
| 338 |
-
"name": "Stack 2.9 Colab Training",
|
| 339 |
"provenance": []
|
| 340 |
},
|
| 341 |
"kernelspec": {
|
|
@@ -349,3 +324,4 @@
|
|
| 349 |
"nbformat": 4,
|
| 350 |
"nbformat_minor": 0
|
| 351 |
}
|
|
|
|
|
|
| 4 |
"cell_type": "markdown",
|
| 5 |
"metadata": {},
|
| 6 |
"source": [
|
| 7 |
+
"# 🚀 Stack 2.9 - Colab Training Notebook (Fixed)\n",
|
| 8 |
"\n",
|
| 9 |
"**Zero-cost training on Google Colab free tier**\n",
|
| 10 |
"\n",
|
|
|
|
| 12 |
"\n",
|
| 13 |
"⏱️ **Expected runtime:** 3-5 hours\n",
|
| 14 |
"💾 **VRAM needed:** ~12GB (fits in T4's 15GB)\n",
|
| 15 |
+
"📦 **Output:** `./model_final/` (merged)\n",
|
| 16 |
"\n",
|
| 17 |
"---\n",
|
| 18 |
"\n",
|
| 19 |
+
"**CRITICAL:** All data saved to **Google Drive** to persist through disconnects.\n",
|
| 20 |
+
"\n",
|
| 21 |
"**Instructions:**\n",
|
| 22 |
"1. Runtime → Change runtime type → **GPU (T4)**\n",
|
| 23 |
+
"2. Run all cells in order\n",
|
| 24 |
+
"3. **Allow** Drive access when prompted\n",
|
| 25 |
"\n",
|
| 26 |
"---"
|
| 27 |
]
|
|
|
|
| 32 |
"metadata": {},
|
| 33 |
"outputs": [],
|
| 34 |
"source": [
|
| 35 |
+
"# Check GPU\n",
|
| 36 |
"!nvidia-smi"
|
| 37 |
]
|
| 38 |
},
|
|
|
|
| 40 |
"cell_type": "markdown",
|
| 41 |
"metadata": {},
|
| 42 |
"source": [
|
| 43 |
+
"## 1️⃣ Mount Google Drive (REQUIRED for persistence)\n",
|
| 44 |
+
"\n",
|
| 45 |
+
"Click the link, allow access, copy the auth code, paste it, and press Enter.\n",
|
| 46 |
"\n",
|
| 47 |
+
"**Without Drive mounting, training will be lost if Colab disconnects!**"
|
| 48 |
]
|
| 49 |
},
|
| 50 |
{
|
|
|
|
| 56 |
"from google.colab import drive\n",
|
| 57 |
"drive.mount('/content/drive')\n",
|
| 58 |
"\n",
|
| 59 |
+
"# Set up paths on Drive - ALL OUTPUT GOES HERE\n",
|
| 60 |
+
"import os\n",
|
| 61 |
+
"BASE_PATH = \"/content/drive/MyDrive/stack-2.9\"\n",
|
| 62 |
+
"os.makedirs(BASE_PATH, exist_ok=True)\n",
|
| 63 |
+
"os.chdir(BASE_PATH)\n",
|
| 64 |
+
"print(f\"\\n✅ Working directory: {os.getcwd()}\")\n",
|
| 65 |
+
"print(f\"All outputs will be saved to: {BASE_PATH}\")\n",
|
| 66 |
+
"print(\"\\nCurrent folder contents:\")\n",
|
| 67 |
+
"!ls -la"
|
| 68 |
]
|
| 69 |
},
|
| 70 |
{
|
| 71 |
"cell_type": "markdown",
|
| 72 |
"metadata": {},
|
| 73 |
"source": [
|
| 74 |
+
"## 2️⃣ Clone Stack 2.9 Repository"
|
| 75 |
]
|
| 76 |
},
|
| 77 |
{
|
|
|
|
| 80 |
"metadata": {},
|
| 81 |
"outputs": [],
|
| 82 |
"source": [
|
| 83 |
+
"# Clone into Drive if not already there\n",
|
|
|
|
|
|
|
|
|
|
| 84 |
"if not os.path.exists('stack-2.9'):\n",
|
| 85 |
" !git clone https://github.com/my-ai-stack/stack-2.9.git\n",
|
| 86 |
"\n",
|
| 87 |
+
"os.chdir('stack-2.9')\n",
|
| 88 |
+
"print(f\"Now in: {os.getcwd()}\")\n",
|
| 89 |
+
"!ls -la"
|
| 90 |
+
]
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"cell_type": "markdown",
|
| 94 |
+
"metadata": {},
|
| 95 |
+
"source": [
|
| 96 |
+
"## 3️⃣ Install Dependencies\n",
|
| 97 |
"\n",
|
| 98 |
+
"Takes 5-10 minutes."
|
| 99 |
+
]
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"cell_type": "code",
|
| 103 |
+
"execution_count": null,
|
| 104 |
+
"metadata": {},
|
| 105 |
+
"outputs": [],
|
| 106 |
+
"source": [
|
| 107 |
"!pip install --upgrade pip\n",
|
| 108 |
"!pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118\n",
|
| 109 |
+
"!pip install transformers==4.40.0 peft==0.10.0 accelerate bitsandbytes==0.43.0 datasets pyyaml\n",
|
| 110 |
+
"print(\"\\n✅ Dependencies installed\")"
|
| 111 |
]
|
| 112 |
},
|
| 113 |
{
|
| 114 |
"cell_type": "markdown",
|
| 115 |
"metadata": {},
|
| 116 |
"source": [
|
| 117 |
+
"## 4️⃣ Create Mini Dataset (5K examples)\n",
|
|
|
|
|
|
|
|
|
|
| 118 |
"\n",
|
| 119 |
+
"Quick prototyping dataset - takes 1-2 minutes."
|
|
|
|
| 120 |
]
|
| 121 |
},
|
| 122 |
{
|
|
|
|
| 125 |
"metadata": {},
|
| 126 |
"outputs": [],
|
| 127 |
"source": [
|
|
|
|
| 128 |
"!python scripts/create_mini_dataset.py --size 5000 --output data_mini/train_mini.jsonl --source training-data/final/train.jsonl\n",
|
| 129 |
"\n",
|
| 130 |
+
"# Verify\n",
|
| 131 |
+
"!ls -lh data_mini/train_mini.jsonl"
|
|
|
|
|
|
|
|
|
|
| 132 |
]
|
| 133 |
},
|
| 134 |
{
|
| 135 |
"cell_type": "markdown",
|
| 136 |
"metadata": {},
|
| 137 |
"source": [
|
| 138 |
+
"## 5️⃣ Prepare Training Configuration\n",
|
| 139 |
"\n",
|
| 140 |
+
"Use Colab-optimized config and point it to the mini dataset."
|
| 141 |
]
|
| 142 |
},
|
| 143 |
{
|
|
|
|
| 146 |
"metadata": {},
|
| 147 |
"outputs": [],
|
| 148 |
"source": [
|
| 149 |
+
"# Copy the Colab-optimized config\n",
|
| 150 |
"!cp stack_2_9_training/train_config_colab.yaml stack_2_9_training/train_config.yaml\n",
|
| 151 |
"\n",
|
| 152 |
+
"# Update config to use mini dataset paths\n",
|
| 153 |
+
"!sed -i 's|train_file: \"./training-data/train.jsonl\"|train_file: \"./data_mini/train_mini.jsonl\"|' stack_2_9_training/train_config.yaml\n",
|
| 154 |
+
"!sed -i 's|validation_file: \"./training-data/eval.jsonl\"|# validation_file: \"./data_mini/eval_mini.jsonl\" # optional|' stack_2_9_training/train_config.yaml\n",
|
| 155 |
+
"\n",
|
| 156 |
+
"print(\"✅ Config prepared for mini dataset\")\n",
|
| 157 |
+
"print(\"\\nTraining config (data section):\")\n",
|
| 158 |
+
"!grep -A3 'data:' stack_2_9_training/train_config.yaml"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 159 |
]
|
| 160 |
},
|
| 161 |
{
|
| 162 |
"cell_type": "markdown",
|
| 163 |
"metadata": {},
|
| 164 |
"source": [
|
| 165 |
+
"## 6️⃣ Train LoRA Adapter\n",
|
| 166 |
+
"\n",
|
| 167 |
+
"⚠️ **This takes 3-5 hours. DO NOT INTERRUPT.**\n",
|
| 168 |
"\n",
|
| 169 |
+
"If Colab disconnects, reconnect and training will resume from checkpoint automatically.\n",
|
| 170 |
"\n",
|
| 171 |
+
"Watch for `Train loss:` decreasing. It should start ~2.0-3.0 and trend downward.\n",
|
| 172 |
"\n",
|
| 173 |
+
"Checkpoints saved every 500 steps to `./adapters_colab/` (on Drive)."
|
| 174 |
]
|
| 175 |
},
|
| 176 |
{
|
| 177 |
"cell_type": "code",
|
| 178 |
"execution_count": null,
|
| 179 |
+
"metadata": {},
|
|
|
|
|
|
|
| 180 |
"outputs": [],
|
| 181 |
"source": [
|
| 182 |
+
"%env PYTHONUNBUFFERED=1\n",
|
| 183 |
+
"\n",
|
| 184 |
+
"print(\"\\n\" + \"=\"*60)\n",
|
| 185 |
+
"print(\"STARTING TRAINING\")\n",
|
| 186 |
+
"print(\"=\"*60)\n",
|
| 187 |
+
"print(f\"Working directory: {os.getcwd()}\")\n",
|
| 188 |
+
"print(f\"Config: stack_2_9_training/train_config.yaml\")\n",
|
| 189 |
+
"print(f\"Output will be saved to: ./adapters_colab/\")\n",
|
| 190 |
+
"print(\"=\"*60 + \"\\n\")\n",
|
| 191 |
"\n",
|
| 192 |
"!cd stack_2_9_training && python -m train_lora --config train_config.yaml\n",
|
| 193 |
"\n",
|
| 194 |
+
"print(\"\\n\" + \"=\"*60)\n",
|
| 195 |
+
"print(\"TRAINING FINISHED OR STOPPED\")\n",
|
| 196 |
+
"print(\"Check output above for errors or 'Training completed' message\")\n",
|
| 197 |
+
"print(\"=\"*60)"
|
| 198 |
]
|
| 199 |
},
|
| 200 |
{
|
| 201 |
"cell_type": "markdown",
|
| 202 |
"metadata": {},
|
| 203 |
"source": [
|
| 204 |
+
"## 7️⃣ Verify Training Output"
|
| 205 |
]
|
| 206 |
},
|
| 207 |
{
|
|
|
|
| 210 |
"metadata": {},
|
| 211 |
"outputs": [],
|
| 212 |
"source": [
|
| 213 |
+
"!ls -la adapters_colab/ 2>/dev/null || echo \"⚠️ adapters_colab/ not found - training may have failed or not run yet\"\n",
|
| 214 |
+
"!find adapters_colab -name \"*.bin\" -o -name \"*.safetensors\" 2>/dev/null | head -5 || echo \"No checkpoint files found\""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 215 |
]
|
| 216 |
},
|
| 217 |
{
|
| 218 |
"cell_type": "markdown",
|
| 219 |
"metadata": {},
|
| 220 |
"source": [
|
| 221 |
+
"## 8️⃣ Merge LoRA Adapter with Base Model\n",
|
| 222 |
"\n",
|
| 223 |
+
"Combines the trained adapter with the base model to produce a standalone fine-tuned model."
|
| 224 |
]
|
| 225 |
},
|
| 226 |
{
|
|
|
|
| 239 |
"cell_type": "markdown",
|
| 240 |
"metadata": {},
|
| 241 |
"source": [
|
| 242 |
+
"## 9️⃣ Test Inference (Quick Check)"
|
|
|
|
|
|
|
| 243 |
]
|
| 244 |
},
|
| 245 |
{
|
|
|
|
| 251 |
"from transformers import AutoTokenizer, AutoModelForCausalLM\n",
|
| 252 |
"import torch\n",
|
| 253 |
"\n",
|
|
|
|
| 254 |
"model_path = \"./model_final\"\n",
|
| 255 |
+
"print(f\"Loading model from {model_path}...\")\n",
|
| 256 |
+
"\n",
|
| 257 |
+
"try:\n",
|
| 258 |
+
" tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\n",
|
| 259 |
+
" model = AutoModelForCausalLM.from_pretrained(\n",
|
| 260 |
+
" model_path,\n",
|
| 261 |
+
" torch_dtype=torch.bfloat16,\n",
|
| 262 |
+
" device_map=\"auto\",\n",
|
| 263 |
+
" trust_remote_code=True\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 264 |
" )\n",
|
| 265 |
+
" \n",
|
| 266 |
+
" prompt = \"Write a Python function to reverse a string:\\n\\n```python\\n\"\n",
|
| 267 |
+
" inputs = tokenizer(prompt, return_tensors=\"pt\").to(model.device)\n",
|
| 268 |
+
" \n",
|
| 269 |
+
" print(\"Generating...\")\n",
|
| 270 |
+
" with torch.no_grad():\n",
|
| 271 |
+
" outputs = model.generate(\n",
|
| 272 |
+
" **inputs,\n",
|
| 273 |
+
" max_new_tokens=200,\n",
|
| 274 |
+
" temperature=0.2,\n",
|
| 275 |
+
" do_sample=True,\n",
|
| 276 |
+
" pad_token_id=tokenizer.eos_token_id\n",
|
| 277 |
+
" )\n",
|
| 278 |
+
" \n",
|
| 279 |
+
" response = tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
|
| 280 |
+
" print(\"=\\\"*40)\n",
|
| 281 |
+
" print(\"RESPONSE:\")\n",
|
| 282 |
+
" print(\"=\\\"*40)\n",
|
| 283 |
+
" print(response[len(prompt):])\n",
|
| 284 |
+
"except Exception as e:\n",
|
| 285 |
+
" print(f\"❌ Error loading or running model: {e}\")\n",
|
| 286 |
+
" print(\"\\nThis is expected if training hasn't completed yet or model files are missing.\")"
|
| 287 |
]
|
| 288 |
},
|
| 289 |
{
|
| 290 |
"cell_type": "markdown",
|
| 291 |
"metadata": {},
|
| 292 |
"source": [
|
| 293 |
+
"## 🔚 Training Complete!\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 294 |
"\n",
|
| 295 |
+
"Your model is ready in `./model_final/` and saved to Google Drive.\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 296 |
"\n",
|
| 297 |
"**Next steps:**\n",
|
| 298 |
+
"1. **Download** `model_final/` from Drive to your local machine\n",
|
| 299 |
+
"2. **Download datasets**: `python scripts/download_benchmark_datasets.py --benchmark both`\n",
|
| 300 |
+
"3. **Run evaluation**: `python stack-2.9-eval/run_proper_evaluation.py --benchmark humaneval --provider ollama --model ./model_final --k-samples 100`\n",
|
| 301 |
+
"4. **Upload model** to Hugging Face Hub\n",
|
| 302 |
+
"5. **Apply to Together AI**\n",
|
| 303 |
+
"\n",
|
| 304 |
+
"**Note:** This model was trained on 5K mini dataset (code completion only). For better tool capability, consider training on full dataset with synthetic tool data in a future run.\n",
|
| 305 |
"\n",
|
| 306 |
"**Need help?** See `COLAB_TRAINING.md` for detailed troubleshooting."
|
| 307 |
]
|
|
|
|
| 310 |
"metadata": {
|
| 311 |
"accelerator": "GPU",
|
| 312 |
"colab": {
|
| 313 |
+
"name": "Stack 2.9 Colab Training (Fixed)",
|
| 314 |
"provenance": []
|
| 315 |
},
|
| 316 |
"kernelspec": {
|
|
|
|
| 324 |
"nbformat": 4,
|
| 325 |
"nbformat_minor": 0
|
| 326 |
}
|
| 327 |
+
}
|
COLAB_TRAINING.md → docs/guides/COLAB_TRAINING.md
RENAMED
|
File without changes
|
EVALUATION.md → docs/guides/EVALUATION.md
RENAMED
|
File without changes
|
BENCHMARKS.md → docs/reference/BENCHMARKS.md
RENAMED
|
File without changes
|
MODEL_CARD.md → docs/reference/MODEL_CARD.md
RENAMED
|
File without changes
|
TOOLS.md → docs/reference/TOOLS.md
RENAMED
|
File without changes
|
demo_stack.py → examples/demo_stack.py
RENAMED
|
File without changes
|
inference_examples.py → examples/inference_examples.py
RENAMED
|
File without changes
|
new.txt
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
x
|
|
|
|
|
|
ollama_model/README.md
DELETED
|
@@ -1,172 +0,0 @@
|
|
| 1 |
-
# Ollama Model Package - Stack 2.9
|
| 2 |
-
|
| 3 |
-
This directory contains the GGUF-formatted version of the **Stack 2.9** model, optimized for use with [Ollama](https://ollama.ai/).
|
| 4 |
-
|
| 5 |
-
## 📁 Contents
|
| 6 |
-
|
| 7 |
-
- `stack-2.9-7b.gguf` - The quantized model file in GGUF format (4-bit, q4_0)
|
| 8 |
-
- (this README)
|
| 9 |
-
|
| 10 |
-
## 🚀 Quick Start
|
| 11 |
-
|
| 12 |
-
### 1. Import the Model
|
| 13 |
-
|
| 14 |
-
```bash
|
| 15 |
-
ollama import ./stack-2.9-7b.gguf --alias stack-2.9:7b
|
| 16 |
-
```
|
| 17 |
-
|
| 18 |
-
### 2. Run the Model
|
| 19 |
-
|
| 20 |
-
```bash
|
| 21 |
-
ollama run stack-2.9:7b
|
| 22 |
-
```
|
| 23 |
-
|
| 24 |
-
## 📦 About the Model
|
| 25 |
-
|
| 26 |
-
- **Name:** Stack 2.9
|
| 27 |
-
- **Size:** 7B parameters
|
| 28 |
-
- **Format:** GGUF (GPU-friendly, CPU+GPU hybrid)
|
| 29 |
-
- **Quantization:** q4_0 (4-bit integer)
|
| 30 |
-
- **Original Base:** Llama 2 or similar foundation
|
| 31 |
-
- **Fine-tuned for:** Code generation, instruction following, and agentic tool use
|
| 32 |
-
|
| 33 |
-
### Why q4_0?
|
| 34 |
-
|
| 35 |
-
- **Smallest size** among quality-preserving quantizations
|
| 36 |
-
- **Fast inference** on CPU and modest GPUs
|
| 37 |
-
- **Good quality** for most use cases
|
| 38 |
-
- Suitable for **local development** and testing
|
| 39 |
-
|
| 40 |
-
If you need higher quality or specific performance characteristics, you can re-quantize using:
|
| 41 |
-
- `q5_0` - Better quality, slightly larger
|
| 42 |
-
- `q8_0` - Near-full precision, 2x size of q4_0
|
| 43 |
-
- `q4_K_M` - Balanced 4-bit with better accuracy
|
| 44 |
-
|
| 45 |
-
## 🔧 Conversion Process
|
| 46 |
-
|
| 47 |
-
The GGUF file was generated using the following steps:
|
| 48 |
-
|
| 49 |
-
1. **Prepare the merged model**
|
| 50 |
-
- The fine-tuned Stack 2.9 model was merged from LoRA adapters into the base model
|
| 51 |
-
- Location: `./output/stack-2.9-7b-merged/` (HuggingFace format)
|
| 52 |
-
|
| 53 |
-
2. **Convert to GGUF with llama.cpp**
|
| 54 |
-
```bash
|
| 55 |
-
python llama.cpp/convert.py ./output/stack-2.9-7b-merged \
|
| 56 |
-
--outfile ./ollama_model/stack-2.9-7b_temp.gguf \
|
| 57 |
-
--outtype f16
|
| 58 |
-
```
|
| 59 |
-
|
| 60 |
-
3. **Quantize to 4-bit (q4_0)**
|
| 61 |
-
```bash
|
| 62 |
-
llama.cpp/quantize ./ollama_model/stack-2.9-7b_temp.gguf \
|
| 63 |
-
./ollama_model/stack-2.9-7b.gguf q4_0
|
| 64 |
-
```
|
| 65 |
-
|
| 66 |
-
4. **Validate**
|
| 67 |
-
- File size check
|
| 68 |
-
- GGUF header validation
|
| 69 |
-
- Ollama import test
|
| 70 |
-
|
| 71 |
-
### Scripts
|
| 72 |
-
|
| 73 |
-
The conversion is automated in:
|
| 74 |
-
```
|
| 75 |
-
scripts/convert_to_gguf.py
|
| 76 |
-
```
|
| 77 |
-
|
| 78 |
-
Usage:
|
| 79 |
-
```bash
|
| 80 |
-
python scripts/convert_to_gguf.py \
|
| 81 |
-
--model-dir ./output/stack-2.9-7b-merged \
|
| 82 |
-
--output ./ollama_model/stack-2.9-7b.gguf \
|
| 83 |
-
--qtype q4_0
|
| 84 |
-
```
|
| 85 |
-
|
| 86 |
-
## ⚙️ Requirements
|
| 87 |
-
|
| 88 |
-
### For Conversion
|
| 89 |
-
|
| 90 |
-
- **Python 3.8+**
|
| 91 |
-
- **llama.cpp** (clone from https://github.com/ggerganov/llama.cpp)
|
| 92 |
-
- Build the `quantize` tool: `make quantize`
|
| 93 |
-
- **PyTorch** (for loading HF model via convert.py)
|
| 94 |
-
- **Transformers** and **safetensors** libraries
|
| 95 |
-
|
| 96 |
-
### For Running (Ollama)
|
| 97 |
-
|
| 98 |
-
- **Ollama** installed (https://ollama.ai/download)
|
| 99 |
-
- Sufficient RAM/VRAM for q4_0 model (~4-5GB recommended)
|
| 100 |
-
|
| 101 |
-
## 🔍 Validation
|
| 102 |
-
|
| 103 |
-
After import, verify the model works:
|
| 104 |
-
|
| 105 |
-
```bash
|
| 106 |
-
# Simple test
|
| 107 |
-
echo "What is Stack 2.9?" | ollama run stack-2.9:7b
|
| 108 |
-
|
| 109 |
-
# Interactive chat
|
| 110 |
-
ollama run stack-2.9:7b
|
| 111 |
-
|
| 112 |
-
# Check model info
|
| 113 |
-
ollama show stack-2.9:7b --modelfile
|
| 114 |
-
```
|
| 115 |
-
|
| 116 |
-
You should see responses indicating the model is functional and recognizes its identity.
|
| 117 |
-
|
| 118 |
-
## 🛠️ Troubleshooting
|
| 119 |
-
|
| 120 |
-
### Import fails: "invalid model file"
|
| 121 |
-
|
| 122 |
-
- Ensure the GGUF file is complete and not corrupted
|
| 123 |
-
- Check that the file size matches expected (~4-5GB for q4_0 7B)
|
| 124 |
-
- Re-run conversion if necessary
|
| 125 |
-
|
| 126 |
-
### Out of memory when running
|
| 127 |
-
|
| 128 |
-
- q4_0 requires ~4-5GB free RAM; close other applications
|
| 129 |
-
- Consider using `--qtype q2_K` for even smaller memory footprint (re-run conversion)
|
| 130 |
-
- Or use `--qtype q8_0` if you have more memory available and want better quality
|
| 131 |
-
|
| 132 |
-
### Conversion fails
|
| 133 |
-
|
| 134 |
-
- Ensure llama.cpp `convert.py` supports your model format (HF transformers)
|
| 135 |
-
- Check that the input model directory contains `config.json` and `pytorch_model.bin` or `.safetensors`
|
| 136 |
-
- Verify llama.cpp is built and `quantize` tool exists
|
| 137 |
-
- Look at error messages from convert.py for specific issues
|
| 138 |
-
|
| 139 |
-
## 📊 File Structure
|
| 140 |
-
|
| 141 |
-
```
|
| 142 |
-
stack-2.9/
|
| 143 |
-
├── scripts/
|
| 144 |
-
│ └── convert_to_gguf.py # Conversion script
|
| 145 |
-
├── output/
|
| 146 |
-
│ └── stack-2.9-7b-merged/ # Merged HuggingFace model (before conversion)
|
| 147 |
-
└── ollama_model/
|
| 148 |
-
├── stack-2.9-7b.gguf # Final GGUF model (this package)
|
| 149 |
-
└── README.md # This file
|
| 150 |
-
```
|
| 151 |
-
|
| 152 |
-
## 📝 Notes
|
| 153 |
-
|
| 154 |
-
- The model was fine-tuned on code generation and agentic tasks
|
| 155 |
-
- Recommended temperature: 0.2-0.7 for coding, 0.7-1.0 for creative tasks
|
| 156 |
-
- System prompt defaults to Stack 2.9's instruction format
|
| 157 |
-
- For production use, consider higher quantization (q6_K, q8_0)
|
| 158 |
-
|
| 159 |
-
## 📜 License
|
| 160 |
-
|
| 161 |
-
Same as the base model used for fine-tuning. See the main repository for license details.
|
| 162 |
-
|
| 163 |
-
## 🤝 Support
|
| 164 |
-
|
| 165 |
-
For issues with:
|
| 166 |
-
- **Stack 2.9 model**: Check the main repository
|
| 167 |
-
- **GGUF conversion**: See llama.cpp documentation
|
| 168 |
-
- **Ollama usage**: https://github.com/ollama/ollama
|
| 169 |
-
|
| 170 |
-
---
|
| 171 |
-
|
| 172 |
-
*Generated by Stack 2.9 conversion pipeline*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
package-lock.json
ADDED
|
@@ -0,0 +1,1844 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "stack-2.9",
|
| 3 |
+
"version": "1.0.0",
|
| 4 |
+
"lockfileVersion": 3,
|
| 5 |
+
"requires": true,
|
| 6 |
+
"packages": {
|
| 7 |
+
"": {
|
| 8 |
+
"name": "stack-2.9",
|
| 9 |
+
"version": "1.0.0",
|
| 10 |
+
"devDependencies": {
|
| 11 |
+
"@types/node": "^20.10.0",
|
| 12 |
+
"typescript": "^5.3.0",
|
| 13 |
+
"vitest": "^1.2.0"
|
| 14 |
+
}
|
| 15 |
+
},
|
| 16 |
+
"node_modules/@esbuild/aix-ppc64": {
|
| 17 |
+
"version": "0.21.5",
|
| 18 |
+
"resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz",
|
| 19 |
+
"integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==",
|
| 20 |
+
"cpu": [
|
| 21 |
+
"ppc64"
|
| 22 |
+
],
|
| 23 |
+
"dev": true,
|
| 24 |
+
"license": "MIT",
|
| 25 |
+
"optional": true,
|
| 26 |
+
"os": [
|
| 27 |
+
"aix"
|
| 28 |
+
],
|
| 29 |
+
"engines": {
|
| 30 |
+
"node": ">=12"
|
| 31 |
+
}
|
| 32 |
+
},
|
| 33 |
+
"node_modules/@esbuild/android-arm": {
|
| 34 |
+
"version": "0.21.5",
|
| 35 |
+
"resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz",
|
| 36 |
+
"integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==",
|
| 37 |
+
"cpu": [
|
| 38 |
+
"arm"
|
| 39 |
+
],
|
| 40 |
+
"dev": true,
|
| 41 |
+
"license": "MIT",
|
| 42 |
+
"optional": true,
|
| 43 |
+
"os": [
|
| 44 |
+
"android"
|
| 45 |
+
],
|
| 46 |
+
"engines": {
|
| 47 |
+
"node": ">=12"
|
| 48 |
+
}
|
| 49 |
+
},
|
| 50 |
+
"node_modules/@esbuild/android-arm64": {
|
| 51 |
+
"version": "0.21.5",
|
| 52 |
+
"resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz",
|
| 53 |
+
"integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==",
|
| 54 |
+
"cpu": [
|
| 55 |
+
"arm64"
|
| 56 |
+
],
|
| 57 |
+
"dev": true,
|
| 58 |
+
"license": "MIT",
|
| 59 |
+
"optional": true,
|
| 60 |
+
"os": [
|
| 61 |
+
"android"
|
| 62 |
+
],
|
| 63 |
+
"engines": {
|
| 64 |
+
"node": ">=12"
|
| 65 |
+
}
|
| 66 |
+
},
|
| 67 |
+
"node_modules/@esbuild/android-x64": {
|
| 68 |
+
"version": "0.21.5",
|
| 69 |
+
"resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz",
|
| 70 |
+
"integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==",
|
| 71 |
+
"cpu": [
|
| 72 |
+
"x64"
|
| 73 |
+
],
|
| 74 |
+
"dev": true,
|
| 75 |
+
"license": "MIT",
|
| 76 |
+
"optional": true,
|
| 77 |
+
"os": [
|
| 78 |
+
"android"
|
| 79 |
+
],
|
| 80 |
+
"engines": {
|
| 81 |
+
"node": ">=12"
|
| 82 |
+
}
|
| 83 |
+
},
|
| 84 |
+
"node_modules/@esbuild/darwin-arm64": {
|
| 85 |
+
"version": "0.21.5",
|
| 86 |
+
"resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz",
|
| 87 |
+
"integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==",
|
| 88 |
+
"cpu": [
|
| 89 |
+
"arm64"
|
| 90 |
+
],
|
| 91 |
+
"dev": true,
|
| 92 |
+
"license": "MIT",
|
| 93 |
+
"optional": true,
|
| 94 |
+
"os": [
|
| 95 |
+
"darwin"
|
| 96 |
+
],
|
| 97 |
+
"engines": {
|
| 98 |
+
"node": ">=12"
|
| 99 |
+
}
|
| 100 |
+
},
|
| 101 |
+
"node_modules/@esbuild/darwin-x64": {
|
| 102 |
+
"version": "0.21.5",
|
| 103 |
+
"resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz",
|
| 104 |
+
"integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==",
|
| 105 |
+
"cpu": [
|
| 106 |
+
"x64"
|
| 107 |
+
],
|
| 108 |
+
"dev": true,
|
| 109 |
+
"license": "MIT",
|
| 110 |
+
"optional": true,
|
| 111 |
+
"os": [
|
| 112 |
+
"darwin"
|
| 113 |
+
],
|
| 114 |
+
"engines": {
|
| 115 |
+
"node": ">=12"
|
| 116 |
+
}
|
| 117 |
+
},
|
| 118 |
+
"node_modules/@esbuild/freebsd-arm64": {
|
| 119 |
+
"version": "0.21.5",
|
| 120 |
+
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz",
|
| 121 |
+
"integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==",
|
| 122 |
+
"cpu": [
|
| 123 |
+
"arm64"
|
| 124 |
+
],
|
| 125 |
+
"dev": true,
|
| 126 |
+
"license": "MIT",
|
| 127 |
+
"optional": true,
|
| 128 |
+
"os": [
|
| 129 |
+
"freebsd"
|
| 130 |
+
],
|
| 131 |
+
"engines": {
|
| 132 |
+
"node": ">=12"
|
| 133 |
+
}
|
| 134 |
+
},
|
| 135 |
+
"node_modules/@esbuild/freebsd-x64": {
|
| 136 |
+
"version": "0.21.5",
|
| 137 |
+
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz",
|
| 138 |
+
"integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==",
|
| 139 |
+
"cpu": [
|
| 140 |
+
"x64"
|
| 141 |
+
],
|
| 142 |
+
"dev": true,
|
| 143 |
+
"license": "MIT",
|
| 144 |
+
"optional": true,
|
| 145 |
+
"os": [
|
| 146 |
+
"freebsd"
|
| 147 |
+
],
|
| 148 |
+
"engines": {
|
| 149 |
+
"node": ">=12"
|
| 150 |
+
}
|
| 151 |
+
},
|
| 152 |
+
"node_modules/@esbuild/linux-arm": {
|
| 153 |
+
"version": "0.21.5",
|
| 154 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz",
|
| 155 |
+
"integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==",
|
| 156 |
+
"cpu": [
|
| 157 |
+
"arm"
|
| 158 |
+
],
|
| 159 |
+
"dev": true,
|
| 160 |
+
"license": "MIT",
|
| 161 |
+
"optional": true,
|
| 162 |
+
"os": [
|
| 163 |
+
"linux"
|
| 164 |
+
],
|
| 165 |
+
"engines": {
|
| 166 |
+
"node": ">=12"
|
| 167 |
+
}
|
| 168 |
+
},
|
| 169 |
+
"node_modules/@esbuild/linux-arm64": {
|
| 170 |
+
"version": "0.21.5",
|
| 171 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz",
|
| 172 |
+
"integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==",
|
| 173 |
+
"cpu": [
|
| 174 |
+
"arm64"
|
| 175 |
+
],
|
| 176 |
+
"dev": true,
|
| 177 |
+
"license": "MIT",
|
| 178 |
+
"optional": true,
|
| 179 |
+
"os": [
|
| 180 |
+
"linux"
|
| 181 |
+
],
|
| 182 |
+
"engines": {
|
| 183 |
+
"node": ">=12"
|
| 184 |
+
}
|
| 185 |
+
},
|
| 186 |
+
"node_modules/@esbuild/linux-ia32": {
|
| 187 |
+
"version": "0.21.5",
|
| 188 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz",
|
| 189 |
+
"integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==",
|
| 190 |
+
"cpu": [
|
| 191 |
+
"ia32"
|
| 192 |
+
],
|
| 193 |
+
"dev": true,
|
| 194 |
+
"license": "MIT",
|
| 195 |
+
"optional": true,
|
| 196 |
+
"os": [
|
| 197 |
+
"linux"
|
| 198 |
+
],
|
| 199 |
+
"engines": {
|
| 200 |
+
"node": ">=12"
|
| 201 |
+
}
|
| 202 |
+
},
|
| 203 |
+
"node_modules/@esbuild/linux-loong64": {
|
| 204 |
+
"version": "0.21.5",
|
| 205 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz",
|
| 206 |
+
"integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==",
|
| 207 |
+
"cpu": [
|
| 208 |
+
"loong64"
|
| 209 |
+
],
|
| 210 |
+
"dev": true,
|
| 211 |
+
"license": "MIT",
|
| 212 |
+
"optional": true,
|
| 213 |
+
"os": [
|
| 214 |
+
"linux"
|
| 215 |
+
],
|
| 216 |
+
"engines": {
|
| 217 |
+
"node": ">=12"
|
| 218 |
+
}
|
| 219 |
+
},
|
| 220 |
+
"node_modules/@esbuild/linux-mips64el": {
|
| 221 |
+
"version": "0.21.5",
|
| 222 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz",
|
| 223 |
+
"integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==",
|
| 224 |
+
"cpu": [
|
| 225 |
+
"mips64el"
|
| 226 |
+
],
|
| 227 |
+
"dev": true,
|
| 228 |
+
"license": "MIT",
|
| 229 |
+
"optional": true,
|
| 230 |
+
"os": [
|
| 231 |
+
"linux"
|
| 232 |
+
],
|
| 233 |
+
"engines": {
|
| 234 |
+
"node": ">=12"
|
| 235 |
+
}
|
| 236 |
+
},
|
| 237 |
+
"node_modules/@esbuild/linux-ppc64": {
|
| 238 |
+
"version": "0.21.5",
|
| 239 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz",
|
| 240 |
+
"integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==",
|
| 241 |
+
"cpu": [
|
| 242 |
+
"ppc64"
|
| 243 |
+
],
|
| 244 |
+
"dev": true,
|
| 245 |
+
"license": "MIT",
|
| 246 |
+
"optional": true,
|
| 247 |
+
"os": [
|
| 248 |
+
"linux"
|
| 249 |
+
],
|
| 250 |
+
"engines": {
|
| 251 |
+
"node": ">=12"
|
| 252 |
+
}
|
| 253 |
+
},
|
| 254 |
+
"node_modules/@esbuild/linux-riscv64": {
|
| 255 |
+
"version": "0.21.5",
|
| 256 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz",
|
| 257 |
+
"integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==",
|
| 258 |
+
"cpu": [
|
| 259 |
+
"riscv64"
|
| 260 |
+
],
|
| 261 |
+
"dev": true,
|
| 262 |
+
"license": "MIT",
|
| 263 |
+
"optional": true,
|
| 264 |
+
"os": [
|
| 265 |
+
"linux"
|
| 266 |
+
],
|
| 267 |
+
"engines": {
|
| 268 |
+
"node": ">=12"
|
| 269 |
+
}
|
| 270 |
+
},
|
| 271 |
+
"node_modules/@esbuild/linux-s390x": {
|
| 272 |
+
"version": "0.21.5",
|
| 273 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz",
|
| 274 |
+
"integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==",
|
| 275 |
+
"cpu": [
|
| 276 |
+
"s390x"
|
| 277 |
+
],
|
| 278 |
+
"dev": true,
|
| 279 |
+
"license": "MIT",
|
| 280 |
+
"optional": true,
|
| 281 |
+
"os": [
|
| 282 |
+
"linux"
|
| 283 |
+
],
|
| 284 |
+
"engines": {
|
| 285 |
+
"node": ">=12"
|
| 286 |
+
}
|
| 287 |
+
},
|
| 288 |
+
"node_modules/@esbuild/linux-x64": {
|
| 289 |
+
"version": "0.21.5",
|
| 290 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz",
|
| 291 |
+
"integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==",
|
| 292 |
+
"cpu": [
|
| 293 |
+
"x64"
|
| 294 |
+
],
|
| 295 |
+
"dev": true,
|
| 296 |
+
"license": "MIT",
|
| 297 |
+
"optional": true,
|
| 298 |
+
"os": [
|
| 299 |
+
"linux"
|
| 300 |
+
],
|
| 301 |
+
"engines": {
|
| 302 |
+
"node": ">=12"
|
| 303 |
+
}
|
| 304 |
+
},
|
| 305 |
+
"node_modules/@esbuild/netbsd-x64": {
|
| 306 |
+
"version": "0.21.5",
|
| 307 |
+
"resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz",
|
| 308 |
+
"integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==",
|
| 309 |
+
"cpu": [
|
| 310 |
+
"x64"
|
| 311 |
+
],
|
| 312 |
+
"dev": true,
|
| 313 |
+
"license": "MIT",
|
| 314 |
+
"optional": true,
|
| 315 |
+
"os": [
|
| 316 |
+
"netbsd"
|
| 317 |
+
],
|
| 318 |
+
"engines": {
|
| 319 |
+
"node": ">=12"
|
| 320 |
+
}
|
| 321 |
+
},
|
| 322 |
+
"node_modules/@esbuild/openbsd-x64": {
|
| 323 |
+
"version": "0.21.5",
|
| 324 |
+
"resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz",
|
| 325 |
+
"integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==",
|
| 326 |
+
"cpu": [
|
| 327 |
+
"x64"
|
| 328 |
+
],
|
| 329 |
+
"dev": true,
|
| 330 |
+
"license": "MIT",
|
| 331 |
+
"optional": true,
|
| 332 |
+
"os": [
|
| 333 |
+
"openbsd"
|
| 334 |
+
],
|
| 335 |
+
"engines": {
|
| 336 |
+
"node": ">=12"
|
| 337 |
+
}
|
| 338 |
+
},
|
| 339 |
+
"node_modules/@esbuild/sunos-x64": {
|
| 340 |
+
"version": "0.21.5",
|
| 341 |
+
"resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz",
|
| 342 |
+
"integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==",
|
| 343 |
+
"cpu": [
|
| 344 |
+
"x64"
|
| 345 |
+
],
|
| 346 |
+
"dev": true,
|
| 347 |
+
"license": "MIT",
|
| 348 |
+
"optional": true,
|
| 349 |
+
"os": [
|
| 350 |
+
"sunos"
|
| 351 |
+
],
|
| 352 |
+
"engines": {
|
| 353 |
+
"node": ">=12"
|
| 354 |
+
}
|
| 355 |
+
},
|
| 356 |
+
"node_modules/@esbuild/win32-arm64": {
|
| 357 |
+
"version": "0.21.5",
|
| 358 |
+
"resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz",
|
| 359 |
+
"integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==",
|
| 360 |
+
"cpu": [
|
| 361 |
+
"arm64"
|
| 362 |
+
],
|
| 363 |
+
"dev": true,
|
| 364 |
+
"license": "MIT",
|
| 365 |
+
"optional": true,
|
| 366 |
+
"os": [
|
| 367 |
+
"win32"
|
| 368 |
+
],
|
| 369 |
+
"engines": {
|
| 370 |
+
"node": ">=12"
|
| 371 |
+
}
|
| 372 |
+
},
|
| 373 |
+
"node_modules/@esbuild/win32-ia32": {
|
| 374 |
+
"version": "0.21.5",
|
| 375 |
+
"resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz",
|
| 376 |
+
"integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==",
|
| 377 |
+
"cpu": [
|
| 378 |
+
"ia32"
|
| 379 |
+
],
|
| 380 |
+
"dev": true,
|
| 381 |
+
"license": "MIT",
|
| 382 |
+
"optional": true,
|
| 383 |
+
"os": [
|
| 384 |
+
"win32"
|
| 385 |
+
],
|
| 386 |
+
"engines": {
|
| 387 |
+
"node": ">=12"
|
| 388 |
+
}
|
| 389 |
+
},
|
| 390 |
+
"node_modules/@esbuild/win32-x64": {
|
| 391 |
+
"version": "0.21.5",
|
| 392 |
+
"resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz",
|
| 393 |
+
"integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==",
|
| 394 |
+
"cpu": [
|
| 395 |
+
"x64"
|
| 396 |
+
],
|
| 397 |
+
"dev": true,
|
| 398 |
+
"license": "MIT",
|
| 399 |
+
"optional": true,
|
| 400 |
+
"os": [
|
| 401 |
+
"win32"
|
| 402 |
+
],
|
| 403 |
+
"engines": {
|
| 404 |
+
"node": ">=12"
|
| 405 |
+
}
|
| 406 |
+
},
|
| 407 |
+
"node_modules/@jest/schemas": {
|
| 408 |
+
"version": "29.6.3",
|
| 409 |
+
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
|
| 410 |
+
"integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
|
| 411 |
+
"dev": true,
|
| 412 |
+
"license": "MIT",
|
| 413 |
+
"dependencies": {
|
| 414 |
+
"@sinclair/typebox": "^0.27.8"
|
| 415 |
+
},
|
| 416 |
+
"engines": {
|
| 417 |
+
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
|
| 418 |
+
}
|
| 419 |
+
},
|
| 420 |
+
"node_modules/@jridgewell/sourcemap-codec": {
|
| 421 |
+
"version": "1.5.5",
|
| 422 |
+
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
|
| 423 |
+
"integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
|
| 424 |
+
"dev": true,
|
| 425 |
+
"license": "MIT"
|
| 426 |
+
},
|
| 427 |
+
"node_modules/@rollup/rollup-android-arm-eabi": {
|
| 428 |
+
"version": "4.60.1",
|
| 429 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.1.tgz",
|
| 430 |
+
"integrity": "sha512-d6FinEBLdIiK+1uACUttJKfgZREXrF0Qc2SmLII7W2AD8FfiZ9Wjd+rD/iRuf5s5dWrr1GgwXCvPqOuDquOowA==",
|
| 431 |
+
"cpu": [
|
| 432 |
+
"arm"
|
| 433 |
+
],
|
| 434 |
+
"dev": true,
|
| 435 |
+
"license": "MIT",
|
| 436 |
+
"optional": true,
|
| 437 |
+
"os": [
|
| 438 |
+
"android"
|
| 439 |
+
]
|
| 440 |
+
},
|
| 441 |
+
"node_modules/@rollup/rollup-android-arm64": {
|
| 442 |
+
"version": "4.60.1",
|
| 443 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.1.tgz",
|
| 444 |
+
"integrity": "sha512-YjG/EwIDvvYI1YvYbHvDz/BYHtkY4ygUIXHnTdLhG+hKIQFBiosfWiACWortsKPKU/+dUwQQCKQM3qrDe8c9BA==",
|
| 445 |
+
"cpu": [
|
| 446 |
+
"arm64"
|
| 447 |
+
],
|
| 448 |
+
"dev": true,
|
| 449 |
+
"license": "MIT",
|
| 450 |
+
"optional": true,
|
| 451 |
+
"os": [
|
| 452 |
+
"android"
|
| 453 |
+
]
|
| 454 |
+
},
|
| 455 |
+
"node_modules/@rollup/rollup-darwin-arm64": {
|
| 456 |
+
"version": "4.60.1",
|
| 457 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.1.tgz",
|
| 458 |
+
"integrity": "sha512-mjCpF7GmkRtSJwon+Rq1N8+pI+8l7w5g9Z3vWj4T7abguC4Czwi3Yu/pFaLvA3TTeMVjnu3ctigusqWUfjZzvw==",
|
| 459 |
+
"cpu": [
|
| 460 |
+
"arm64"
|
| 461 |
+
],
|
| 462 |
+
"dev": true,
|
| 463 |
+
"license": "MIT",
|
| 464 |
+
"optional": true,
|
| 465 |
+
"os": [
|
| 466 |
+
"darwin"
|
| 467 |
+
]
|
| 468 |
+
},
|
| 469 |
+
"node_modules/@rollup/rollup-darwin-x64": {
|
| 470 |
+
"version": "4.60.1",
|
| 471 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.1.tgz",
|
| 472 |
+
"integrity": "sha512-haZ7hJ1JT4e9hqkoT9R/19XW2QKqjfJVv+i5AGg57S+nLk9lQnJ1F/eZloRO3o9Scy9CM3wQ9l+dkXtcBgN5Ew==",
|
| 473 |
+
"cpu": [
|
| 474 |
+
"x64"
|
| 475 |
+
],
|
| 476 |
+
"dev": true,
|
| 477 |
+
"license": "MIT",
|
| 478 |
+
"optional": true,
|
| 479 |
+
"os": [
|
| 480 |
+
"darwin"
|
| 481 |
+
]
|
| 482 |
+
},
|
| 483 |
+
"node_modules/@rollup/rollup-freebsd-arm64": {
|
| 484 |
+
"version": "4.60.1",
|
| 485 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.1.tgz",
|
| 486 |
+
"integrity": "sha512-czw90wpQq3ZsAVBlinZjAYTKduOjTywlG7fEeWKUA7oCmpA8xdTkxZZlwNJKWqILlq0wehoZcJYfBvOyhPTQ6w==",
|
| 487 |
+
"cpu": [
|
| 488 |
+
"arm64"
|
| 489 |
+
],
|
| 490 |
+
"dev": true,
|
| 491 |
+
"license": "MIT",
|
| 492 |
+
"optional": true,
|
| 493 |
+
"os": [
|
| 494 |
+
"freebsd"
|
| 495 |
+
]
|
| 496 |
+
},
|
| 497 |
+
"node_modules/@rollup/rollup-freebsd-x64": {
|
| 498 |
+
"version": "4.60.1",
|
| 499 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.1.tgz",
|
| 500 |
+
"integrity": "sha512-KVB2rqsxTHuBtfOeySEyzEOB7ltlB/ux38iu2rBQzkjbwRVlkhAGIEDiiYnO2kFOkJp+Z7pUXKyrRRFuFUKt+g==",
|
| 501 |
+
"cpu": [
|
| 502 |
+
"x64"
|
| 503 |
+
],
|
| 504 |
+
"dev": true,
|
| 505 |
+
"license": "MIT",
|
| 506 |
+
"optional": true,
|
| 507 |
+
"os": [
|
| 508 |
+
"freebsd"
|
| 509 |
+
]
|
| 510 |
+
},
|
| 511 |
+
"node_modules/@rollup/rollup-linux-arm-gnueabihf": {
|
| 512 |
+
"version": "4.60.1",
|
| 513 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.1.tgz",
|
| 514 |
+
"integrity": "sha512-L+34Qqil+v5uC0zEubW7uByo78WOCIrBvci69E7sFASRl0X7b/MB6Cqd1lky/CtcSVTydWa2WZwFuWexjS5o6g==",
|
| 515 |
+
"cpu": [
|
| 516 |
+
"arm"
|
| 517 |
+
],
|
| 518 |
+
"dev": true,
|
| 519 |
+
"license": "MIT",
|
| 520 |
+
"optional": true,
|
| 521 |
+
"os": [
|
| 522 |
+
"linux"
|
| 523 |
+
]
|
| 524 |
+
},
|
| 525 |
+
"node_modules/@rollup/rollup-linux-arm-musleabihf": {
|
| 526 |
+
"version": "4.60.1",
|
| 527 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.1.tgz",
|
| 528 |
+
"integrity": "sha512-n83O8rt4v34hgFzlkb1ycniJh7IR5RCIqt6mz1VRJD6pmhRi0CXdmfnLu9dIUS6buzh60IvACM842Ffb3xd6Gg==",
|
| 529 |
+
"cpu": [
|
| 530 |
+
"arm"
|
| 531 |
+
],
|
| 532 |
+
"dev": true,
|
| 533 |
+
"license": "MIT",
|
| 534 |
+
"optional": true,
|
| 535 |
+
"os": [
|
| 536 |
+
"linux"
|
| 537 |
+
]
|
| 538 |
+
},
|
| 539 |
+
"node_modules/@rollup/rollup-linux-arm64-gnu": {
|
| 540 |
+
"version": "4.60.1",
|
| 541 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.1.tgz",
|
| 542 |
+
"integrity": "sha512-Nql7sTeAzhTAja3QXeAI48+/+GjBJ+QmAH13snn0AJSNL50JsDqotyudHyMbO2RbJkskbMbFJfIJKWA6R1LCJQ==",
|
| 543 |
+
"cpu": [
|
| 544 |
+
"arm64"
|
| 545 |
+
],
|
| 546 |
+
"dev": true,
|
| 547 |
+
"license": "MIT",
|
| 548 |
+
"optional": true,
|
| 549 |
+
"os": [
|
| 550 |
+
"linux"
|
| 551 |
+
]
|
| 552 |
+
},
|
| 553 |
+
"node_modules/@rollup/rollup-linux-arm64-musl": {
|
| 554 |
+
"version": "4.60.1",
|
| 555 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.1.tgz",
|
| 556 |
+
"integrity": "sha512-+pUymDhd0ys9GcKZPPWlFiZ67sTWV5UU6zOJat02M1+PiuSGDziyRuI/pPue3hoUwm2uGfxdL+trT6Z9rxnlMA==",
|
| 557 |
+
"cpu": [
|
| 558 |
+
"arm64"
|
| 559 |
+
],
|
| 560 |
+
"dev": true,
|
| 561 |
+
"license": "MIT",
|
| 562 |
+
"optional": true,
|
| 563 |
+
"os": [
|
| 564 |
+
"linux"
|
| 565 |
+
]
|
| 566 |
+
},
|
| 567 |
+
"node_modules/@rollup/rollup-linux-loong64-gnu": {
|
| 568 |
+
"version": "4.60.1",
|
| 569 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.1.tgz",
|
| 570 |
+
"integrity": "sha512-VSvgvQeIcsEvY4bKDHEDWcpW4Yw7BtlKG1GUT4FzBUlEKQK0rWHYBqQt6Fm2taXS+1bXvJT6kICu5ZwqKCnvlQ==",
|
| 571 |
+
"cpu": [
|
| 572 |
+
"loong64"
|
| 573 |
+
],
|
| 574 |
+
"dev": true,
|
| 575 |
+
"license": "MIT",
|
| 576 |
+
"optional": true,
|
| 577 |
+
"os": [
|
| 578 |
+
"linux"
|
| 579 |
+
]
|
| 580 |
+
},
|
| 581 |
+
"node_modules/@rollup/rollup-linux-loong64-musl": {
|
| 582 |
+
"version": "4.60.1",
|
| 583 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.1.tgz",
|
| 584 |
+
"integrity": "sha512-4LqhUomJqwe641gsPp6xLfhqWMbQV04KtPp7/dIp0nzPxAkNY1AbwL5W0MQpcalLYk07vaW9Kp1PBhdpZYYcEw==",
|
| 585 |
+
"cpu": [
|
| 586 |
+
"loong64"
|
| 587 |
+
],
|
| 588 |
+
"dev": true,
|
| 589 |
+
"license": "MIT",
|
| 590 |
+
"optional": true,
|
| 591 |
+
"os": [
|
| 592 |
+
"linux"
|
| 593 |
+
]
|
| 594 |
+
},
|
| 595 |
+
"node_modules/@rollup/rollup-linux-ppc64-gnu": {
|
| 596 |
+
"version": "4.60.1",
|
| 597 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.1.tgz",
|
| 598 |
+
"integrity": "sha512-tLQQ9aPvkBxOc/EUT6j3pyeMD6Hb8QF2BTBnCQWP/uu1lhc9AIrIjKnLYMEroIz/JvtGYgI9dF3AxHZNaEH0rw==",
|
| 599 |
+
"cpu": [
|
| 600 |
+
"ppc64"
|
| 601 |
+
],
|
| 602 |
+
"dev": true,
|
| 603 |
+
"license": "MIT",
|
| 604 |
+
"optional": true,
|
| 605 |
+
"os": [
|
| 606 |
+
"linux"
|
| 607 |
+
]
|
| 608 |
+
},
|
| 609 |
+
"node_modules/@rollup/rollup-linux-ppc64-musl": {
|
| 610 |
+
"version": "4.60.1",
|
| 611 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.1.tgz",
|
| 612 |
+
"integrity": "sha512-RMxFhJwc9fSXP6PqmAz4cbv3kAyvD1etJFjTx4ONqFP9DkTkXsAMU4v3Vyc5BgzC+anz7nS/9tp4obsKfqkDHg==",
|
| 613 |
+
"cpu": [
|
| 614 |
+
"ppc64"
|
| 615 |
+
],
|
| 616 |
+
"dev": true,
|
| 617 |
+
"license": "MIT",
|
| 618 |
+
"optional": true,
|
| 619 |
+
"os": [
|
| 620 |
+
"linux"
|
| 621 |
+
]
|
| 622 |
+
},
|
| 623 |
+
"node_modules/@rollup/rollup-linux-riscv64-gnu": {
|
| 624 |
+
"version": "4.60.1",
|
| 625 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.1.tgz",
|
| 626 |
+
"integrity": "sha512-QKgFl+Yc1eEk6MmOBfRHYF6lTxiiiV3/z/BRrbSiW2I7AFTXoBFvdMEyglohPj//2mZS4hDOqeB0H1ACh3sBbg==",
|
| 627 |
+
"cpu": [
|
| 628 |
+
"riscv64"
|
| 629 |
+
],
|
| 630 |
+
"dev": true,
|
| 631 |
+
"license": "MIT",
|
| 632 |
+
"optional": true,
|
| 633 |
+
"os": [
|
| 634 |
+
"linux"
|
| 635 |
+
]
|
| 636 |
+
},
|
| 637 |
+
"node_modules/@rollup/rollup-linux-riscv64-musl": {
|
| 638 |
+
"version": "4.60.1",
|
| 639 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.1.tgz",
|
| 640 |
+
"integrity": "sha512-RAjXjP/8c6ZtzatZcA1RaQr6O1TRhzC+adn8YZDnChliZHviqIjmvFwHcxi4JKPSDAt6Uhf/7vqcBzQJy0PDJg==",
|
| 641 |
+
"cpu": [
|
| 642 |
+
"riscv64"
|
| 643 |
+
],
|
| 644 |
+
"dev": true,
|
| 645 |
+
"license": "MIT",
|
| 646 |
+
"optional": true,
|
| 647 |
+
"os": [
|
| 648 |
+
"linux"
|
| 649 |
+
]
|
| 650 |
+
},
|
| 651 |
+
"node_modules/@rollup/rollup-linux-s390x-gnu": {
|
| 652 |
+
"version": "4.60.1",
|
| 653 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.1.tgz",
|
| 654 |
+
"integrity": "sha512-wcuocpaOlaL1COBYiA89O6yfjlp3RwKDeTIA0hM7OpmhR1Bjo9j31G1uQVpDlTvwxGn2nQs65fBFL5UFd76FcQ==",
|
| 655 |
+
"cpu": [
|
| 656 |
+
"s390x"
|
| 657 |
+
],
|
| 658 |
+
"dev": true,
|
| 659 |
+
"license": "MIT",
|
| 660 |
+
"optional": true,
|
| 661 |
+
"os": [
|
| 662 |
+
"linux"
|
| 663 |
+
]
|
| 664 |
+
},
|
| 665 |
+
"node_modules/@rollup/rollup-linux-x64-gnu": {
|
| 666 |
+
"version": "4.60.1",
|
| 667 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.1.tgz",
|
| 668 |
+
"integrity": "sha512-77PpsFQUCOiZR9+LQEFg9GClyfkNXj1MP6wRnzYs0EeWbPcHs02AXu4xuUbM1zhwn3wqaizle3AEYg5aeoohhg==",
|
| 669 |
+
"cpu": [
|
| 670 |
+
"x64"
|
| 671 |
+
],
|
| 672 |
+
"dev": true,
|
| 673 |
+
"license": "MIT",
|
| 674 |
+
"optional": true,
|
| 675 |
+
"os": [
|
| 676 |
+
"linux"
|
| 677 |
+
]
|
| 678 |
+
},
|
| 679 |
+
"node_modules/@rollup/rollup-linux-x64-musl": {
|
| 680 |
+
"version": "4.60.1",
|
| 681 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.1.tgz",
|
| 682 |
+
"integrity": "sha512-5cIATbk5vynAjqqmyBjlciMJl1+R/CwX9oLk/EyiFXDWd95KpHdrOJT//rnUl4cUcskrd0jCCw3wpZnhIHdD9w==",
|
| 683 |
+
"cpu": [
|
| 684 |
+
"x64"
|
| 685 |
+
],
|
| 686 |
+
"dev": true,
|
| 687 |
+
"license": "MIT",
|
| 688 |
+
"optional": true,
|
| 689 |
+
"os": [
|
| 690 |
+
"linux"
|
| 691 |
+
]
|
| 692 |
+
},
|
| 693 |
+
"node_modules/@rollup/rollup-openbsd-x64": {
|
| 694 |
+
"version": "4.60.1",
|
| 695 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.1.tgz",
|
| 696 |
+
"integrity": "sha512-cl0w09WsCi17mcmWqqglez9Gk8isgeWvoUZ3WiJFYSR3zjBQc2J5/ihSjpl+VLjPqjQ/1hJRcqBfLjssREQILw==",
|
| 697 |
+
"cpu": [
|
| 698 |
+
"x64"
|
| 699 |
+
],
|
| 700 |
+
"dev": true,
|
| 701 |
+
"license": "MIT",
|
| 702 |
+
"optional": true,
|
| 703 |
+
"os": [
|
| 704 |
+
"openbsd"
|
| 705 |
+
]
|
| 706 |
+
},
|
| 707 |
+
"node_modules/@rollup/rollup-openharmony-arm64": {
|
| 708 |
+
"version": "4.60.1",
|
| 709 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.1.tgz",
|
| 710 |
+
"integrity": "sha512-4Cv23ZrONRbNtbZa37mLSueXUCtN7MXccChtKpUnQNgF010rjrjfHx3QxkS2PI7LqGT5xXyYs1a7LbzAwT0iCA==",
|
| 711 |
+
"cpu": [
|
| 712 |
+
"arm64"
|
| 713 |
+
],
|
| 714 |
+
"dev": true,
|
| 715 |
+
"license": "MIT",
|
| 716 |
+
"optional": true,
|
| 717 |
+
"os": [
|
| 718 |
+
"openharmony"
|
| 719 |
+
]
|
| 720 |
+
},
|
| 721 |
+
"node_modules/@rollup/rollup-win32-arm64-msvc": {
|
| 722 |
+
"version": "4.60.1",
|
| 723 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.1.tgz",
|
| 724 |
+
"integrity": "sha512-i1okWYkA4FJICtr7KpYzFpRTHgy5jdDbZiWfvny21iIKky5YExiDXP+zbXzm3dUcFpkEeYNHgQ5fuG236JPq0g==",
|
| 725 |
+
"cpu": [
|
| 726 |
+
"arm64"
|
| 727 |
+
],
|
| 728 |
+
"dev": true,
|
| 729 |
+
"license": "MIT",
|
| 730 |
+
"optional": true,
|
| 731 |
+
"os": [
|
| 732 |
+
"win32"
|
| 733 |
+
]
|
| 734 |
+
},
|
| 735 |
+
"node_modules/@rollup/rollup-win32-ia32-msvc": {
|
| 736 |
+
"version": "4.60.1",
|
| 737 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.1.tgz",
|
| 738 |
+
"integrity": "sha512-u09m3CuwLzShA0EYKMNiFgcjjzwqtUMLmuCJLeZWjjOYA3IT2Di09KaxGBTP9xVztWyIWjVdsB2E9goMjZvTQg==",
|
| 739 |
+
"cpu": [
|
| 740 |
+
"ia32"
|
| 741 |
+
],
|
| 742 |
+
"dev": true,
|
| 743 |
+
"license": "MIT",
|
| 744 |
+
"optional": true,
|
| 745 |
+
"os": [
|
| 746 |
+
"win32"
|
| 747 |
+
]
|
| 748 |
+
},
|
| 749 |
+
"node_modules/@rollup/rollup-win32-x64-gnu": {
|
| 750 |
+
"version": "4.60.1",
|
| 751 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.1.tgz",
|
| 752 |
+
"integrity": "sha512-k+600V9Zl1CM7eZxJgMyTUzmrmhB/0XZnF4pRypKAlAgxmedUA+1v9R+XOFv56W4SlHEzfeMtzujLJD22Uz5zg==",
|
| 753 |
+
"cpu": [
|
| 754 |
+
"x64"
|
| 755 |
+
],
|
| 756 |
+
"dev": true,
|
| 757 |
+
"license": "MIT",
|
| 758 |
+
"optional": true,
|
| 759 |
+
"os": [
|
| 760 |
+
"win32"
|
| 761 |
+
]
|
| 762 |
+
},
|
| 763 |
+
"node_modules/@rollup/rollup-win32-x64-msvc": {
|
| 764 |
+
"version": "4.60.1",
|
| 765 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.1.tgz",
|
| 766 |
+
"integrity": "sha512-lWMnixq/QzxyhTV6NjQJ4SFo1J6PvOX8vUx5Wb4bBPsEb+8xZ89Bz6kOXpfXj9ak9AHTQVQzlgzBEc1SyM27xQ==",
|
| 767 |
+
"cpu": [
|
| 768 |
+
"x64"
|
| 769 |
+
],
|
| 770 |
+
"dev": true,
|
| 771 |
+
"license": "MIT",
|
| 772 |
+
"optional": true,
|
| 773 |
+
"os": [
|
| 774 |
+
"win32"
|
| 775 |
+
]
|
| 776 |
+
},
|
| 777 |
+
"node_modules/@sinclair/typebox": {
|
| 778 |
+
"version": "0.27.10",
|
| 779 |
+
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.10.tgz",
|
| 780 |
+
"integrity": "sha512-MTBk/3jGLNB2tVxv6uLlFh1iu64iYOQ2PbdOSK3NW8JZsmlaOh2q6sdtKowBhfw8QFLmYNzTW4/oK4uATIi6ZA==",
|
| 781 |
+
"dev": true,
|
| 782 |
+
"license": "MIT"
|
| 783 |
+
},
|
| 784 |
+
"node_modules/@types/estree": {
|
| 785 |
+
"version": "1.0.8",
|
| 786 |
+
"resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
|
| 787 |
+
"integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==",
|
| 788 |
+
"dev": true,
|
| 789 |
+
"license": "MIT"
|
| 790 |
+
},
|
| 791 |
+
"node_modules/@types/node": {
|
| 792 |
+
"version": "20.19.39",
|
| 793 |
+
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.39.tgz",
|
| 794 |
+
"integrity": "sha512-orrrD74MBUyK8jOAD/r0+lfa1I2MO6I+vAkmAWzMYbCcgrN4lCrmK52gRFQq/JRxfYPfonkr4b0jcY7Olqdqbw==",
|
| 795 |
+
"dev": true,
|
| 796 |
+
"license": "MIT",
|
| 797 |
+
"dependencies": {
|
| 798 |
+
"undici-types": "~6.21.0"
|
| 799 |
+
}
|
| 800 |
+
},
|
| 801 |
+
"node_modules/@vitest/expect": {
|
| 802 |
+
"version": "1.6.1",
|
| 803 |
+
"resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-1.6.1.tgz",
|
| 804 |
+
"integrity": "sha512-jXL+9+ZNIJKruofqXuuTClf44eSpcHlgj3CiuNihUF3Ioujtmc0zIa3UJOW5RjDK1YLBJZnWBlPuqhYycLioog==",
|
| 805 |
+
"dev": true,
|
| 806 |
+
"license": "MIT",
|
| 807 |
+
"dependencies": {
|
| 808 |
+
"@vitest/spy": "1.6.1",
|
| 809 |
+
"@vitest/utils": "1.6.1",
|
| 810 |
+
"chai": "^4.3.10"
|
| 811 |
+
},
|
| 812 |
+
"funding": {
|
| 813 |
+
"url": "https://opencollective.com/vitest"
|
| 814 |
+
}
|
| 815 |
+
},
|
| 816 |
+
"node_modules/@vitest/runner": {
|
| 817 |
+
"version": "1.6.1",
|
| 818 |
+
"resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-1.6.1.tgz",
|
| 819 |
+
"integrity": "sha512-3nSnYXkVkf3mXFfE7vVyPmi3Sazhb/2cfZGGs0JRzFsPFvAMBEcrweV1V1GsrstdXeKCTXlJbvnQwGWgEIHmOA==",
|
| 820 |
+
"dev": true,
|
| 821 |
+
"license": "MIT",
|
| 822 |
+
"dependencies": {
|
| 823 |
+
"@vitest/utils": "1.6.1",
|
| 824 |
+
"p-limit": "^5.0.0",
|
| 825 |
+
"pathe": "^1.1.1"
|
| 826 |
+
},
|
| 827 |
+
"funding": {
|
| 828 |
+
"url": "https://opencollective.com/vitest"
|
| 829 |
+
}
|
| 830 |
+
},
|
| 831 |
+
"node_modules/@vitest/snapshot": {
|
| 832 |
+
"version": "1.6.1",
|
| 833 |
+
"resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-1.6.1.tgz",
|
| 834 |
+
"integrity": "sha512-WvidQuWAzU2p95u8GAKlRMqMyN1yOJkGHnx3M1PL9Raf7AQ1kwLKg04ADlCa3+OXUZE7BceOhVZiuWAbzCKcUQ==",
|
| 835 |
+
"dev": true,
|
| 836 |
+
"license": "MIT",
|
| 837 |
+
"dependencies": {
|
| 838 |
+
"magic-string": "^0.30.5",
|
| 839 |
+
"pathe": "^1.1.1",
|
| 840 |
+
"pretty-format": "^29.7.0"
|
| 841 |
+
},
|
| 842 |
+
"funding": {
|
| 843 |
+
"url": "https://opencollective.com/vitest"
|
| 844 |
+
}
|
| 845 |
+
},
|
| 846 |
+
"node_modules/@vitest/spy": {
|
| 847 |
+
"version": "1.6.1",
|
| 848 |
+
"resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-1.6.1.tgz",
|
| 849 |
+
"integrity": "sha512-MGcMmpGkZebsMZhbQKkAf9CX5zGvjkBTqf8Zx3ApYWXr3wG+QvEu2eXWfnIIWYSJExIp4V9FCKDEeygzkYrXMw==",
|
| 850 |
+
"dev": true,
|
| 851 |
+
"license": "MIT",
|
| 852 |
+
"dependencies": {
|
| 853 |
+
"tinyspy": "^2.2.0"
|
| 854 |
+
},
|
| 855 |
+
"funding": {
|
| 856 |
+
"url": "https://opencollective.com/vitest"
|
| 857 |
+
}
|
| 858 |
+
},
|
| 859 |
+
"node_modules/@vitest/utils": {
|
| 860 |
+
"version": "1.6.1",
|
| 861 |
+
"resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-1.6.1.tgz",
|
| 862 |
+
"integrity": "sha512-jOrrUvXM4Av9ZWiG1EajNto0u96kWAhJ1LmPmJhXXQx/32MecEKd10pOLYgS2BQx1TgkGhloPU1ArDW2vvaY6g==",
|
| 863 |
+
"dev": true,
|
| 864 |
+
"license": "MIT",
|
| 865 |
+
"dependencies": {
|
| 866 |
+
"diff-sequences": "^29.6.3",
|
| 867 |
+
"estree-walker": "^3.0.3",
|
| 868 |
+
"loupe": "^2.3.7",
|
| 869 |
+
"pretty-format": "^29.7.0"
|
| 870 |
+
},
|
| 871 |
+
"funding": {
|
| 872 |
+
"url": "https://opencollective.com/vitest"
|
| 873 |
+
}
|
| 874 |
+
},
|
| 875 |
+
"node_modules/acorn": {
|
| 876 |
+
"version": "8.16.0",
|
| 877 |
+
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz",
|
| 878 |
+
"integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==",
|
| 879 |
+
"dev": true,
|
| 880 |
+
"license": "MIT",
|
| 881 |
+
"bin": {
|
| 882 |
+
"acorn": "bin/acorn"
|
| 883 |
+
},
|
| 884 |
+
"engines": {
|
| 885 |
+
"node": ">=0.4.0"
|
| 886 |
+
}
|
| 887 |
+
},
|
| 888 |
+
"node_modules/acorn-walk": {
|
| 889 |
+
"version": "8.3.5",
|
| 890 |
+
"resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.5.tgz",
|
| 891 |
+
"integrity": "sha512-HEHNfbars9v4pgpW6SO1KSPkfoS0xVOM/9UzkJltjlsHZmJasxg8aXkuZa7SMf8vKGIBhpUsPluQSqhJFCqebw==",
|
| 892 |
+
"dev": true,
|
| 893 |
+
"license": "MIT",
|
| 894 |
+
"dependencies": {
|
| 895 |
+
"acorn": "^8.11.0"
|
| 896 |
+
},
|
| 897 |
+
"engines": {
|
| 898 |
+
"node": ">=0.4.0"
|
| 899 |
+
}
|
| 900 |
+
},
|
| 901 |
+
"node_modules/ansi-styles": {
|
| 902 |
+
"version": "5.2.0",
|
| 903 |
+
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
|
| 904 |
+
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
|
| 905 |
+
"dev": true,
|
| 906 |
+
"license": "MIT",
|
| 907 |
+
"engines": {
|
| 908 |
+
"node": ">=10"
|
| 909 |
+
},
|
| 910 |
+
"funding": {
|
| 911 |
+
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
|
| 912 |
+
}
|
| 913 |
+
},
|
| 914 |
+
"node_modules/assertion-error": {
|
| 915 |
+
"version": "1.1.0",
|
| 916 |
+
"resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz",
|
| 917 |
+
"integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==",
|
| 918 |
+
"dev": true,
|
| 919 |
+
"license": "MIT",
|
| 920 |
+
"engines": {
|
| 921 |
+
"node": "*"
|
| 922 |
+
}
|
| 923 |
+
},
|
| 924 |
+
"node_modules/cac": {
|
| 925 |
+
"version": "6.7.14",
|
| 926 |
+
"resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz",
|
| 927 |
+
"integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==",
|
| 928 |
+
"dev": true,
|
| 929 |
+
"license": "MIT",
|
| 930 |
+
"engines": {
|
| 931 |
+
"node": ">=8"
|
| 932 |
+
}
|
| 933 |
+
},
|
| 934 |
+
"node_modules/chai": {
|
| 935 |
+
"version": "4.5.0",
|
| 936 |
+
"resolved": "https://registry.npmjs.org/chai/-/chai-4.5.0.tgz",
|
| 937 |
+
"integrity": "sha512-RITGBfijLkBddZvnn8jdqoTypxvqbOLYQkGGxXzeFjVHvudaPw0HNFD9x928/eUwYWd2dPCugVqspGALTZZQKw==",
|
| 938 |
+
"dev": true,
|
| 939 |
+
"license": "MIT",
|
| 940 |
+
"dependencies": {
|
| 941 |
+
"assertion-error": "^1.1.0",
|
| 942 |
+
"check-error": "^1.0.3",
|
| 943 |
+
"deep-eql": "^4.1.3",
|
| 944 |
+
"get-func-name": "^2.0.2",
|
| 945 |
+
"loupe": "^2.3.6",
|
| 946 |
+
"pathval": "^1.1.1",
|
| 947 |
+
"type-detect": "^4.1.0"
|
| 948 |
+
},
|
| 949 |
+
"engines": {
|
| 950 |
+
"node": ">=4"
|
| 951 |
+
}
|
| 952 |
+
},
|
| 953 |
+
"node_modules/check-error": {
|
| 954 |
+
"version": "1.0.3",
|
| 955 |
+
"resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.3.tgz",
|
| 956 |
+
"integrity": "sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==",
|
| 957 |
+
"dev": true,
|
| 958 |
+
"license": "MIT",
|
| 959 |
+
"dependencies": {
|
| 960 |
+
"get-func-name": "^2.0.2"
|
| 961 |
+
},
|
| 962 |
+
"engines": {
|
| 963 |
+
"node": "*"
|
| 964 |
+
}
|
| 965 |
+
},
|
| 966 |
+
"node_modules/confbox": {
|
| 967 |
+
"version": "0.1.8",
|
| 968 |
+
"resolved": "https://registry.npmjs.org/confbox/-/confbox-0.1.8.tgz",
|
| 969 |
+
"integrity": "sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==",
|
| 970 |
+
"dev": true,
|
| 971 |
+
"license": "MIT"
|
| 972 |
+
},
|
| 973 |
+
"node_modules/cross-spawn": {
|
| 974 |
+
"version": "7.0.6",
|
| 975 |
+
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
|
| 976 |
+
"integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
|
| 977 |
+
"dev": true,
|
| 978 |
+
"license": "MIT",
|
| 979 |
+
"dependencies": {
|
| 980 |
+
"path-key": "^3.1.0",
|
| 981 |
+
"shebang-command": "^2.0.0",
|
| 982 |
+
"which": "^2.0.1"
|
| 983 |
+
},
|
| 984 |
+
"engines": {
|
| 985 |
+
"node": ">= 8"
|
| 986 |
+
}
|
| 987 |
+
},
|
| 988 |
+
"node_modules/debug": {
|
| 989 |
+
"version": "4.4.3",
|
| 990 |
+
"resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
|
| 991 |
+
"integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
|
| 992 |
+
"dev": true,
|
| 993 |
+
"license": "MIT",
|
| 994 |
+
"dependencies": {
|
| 995 |
+
"ms": "^2.1.3"
|
| 996 |
+
},
|
| 997 |
+
"engines": {
|
| 998 |
+
"node": ">=6.0"
|
| 999 |
+
},
|
| 1000 |
+
"peerDependenciesMeta": {
|
| 1001 |
+
"supports-color": {
|
| 1002 |
+
"optional": true
|
| 1003 |
+
}
|
| 1004 |
+
}
|
| 1005 |
+
},
|
| 1006 |
+
"node_modules/deep-eql": {
|
| 1007 |
+
"version": "4.1.4",
|
| 1008 |
+
"resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-4.1.4.tgz",
|
| 1009 |
+
"integrity": "sha512-SUwdGfqdKOwxCPeVYjwSyRpJ7Z+fhpwIAtmCUdZIWZ/YP5R9WAsyuSgpLVDi9bjWoN2LXHNss/dk3urXtdQxGg==",
|
| 1010 |
+
"dev": true,
|
| 1011 |
+
"license": "MIT",
|
| 1012 |
+
"dependencies": {
|
| 1013 |
+
"type-detect": "^4.0.0"
|
| 1014 |
+
},
|
| 1015 |
+
"engines": {
|
| 1016 |
+
"node": ">=6"
|
| 1017 |
+
}
|
| 1018 |
+
},
|
| 1019 |
+
"node_modules/diff-sequences": {
|
| 1020 |
+
"version": "29.6.3",
|
| 1021 |
+
"resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz",
|
| 1022 |
+
"integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==",
|
| 1023 |
+
"dev": true,
|
| 1024 |
+
"license": "MIT",
|
| 1025 |
+
"engines": {
|
| 1026 |
+
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
|
| 1027 |
+
}
|
| 1028 |
+
},
|
| 1029 |
+
"node_modules/esbuild": {
|
| 1030 |
+
"version": "0.21.5",
|
| 1031 |
+
"resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz",
|
| 1032 |
+
"integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==",
|
| 1033 |
+
"dev": true,
|
| 1034 |
+
"hasInstallScript": true,
|
| 1035 |
+
"license": "MIT",
|
| 1036 |
+
"bin": {
|
| 1037 |
+
"esbuild": "bin/esbuild"
|
| 1038 |
+
},
|
| 1039 |
+
"engines": {
|
| 1040 |
+
"node": ">=12"
|
| 1041 |
+
},
|
| 1042 |
+
"optionalDependencies": {
|
| 1043 |
+
"@esbuild/aix-ppc64": "0.21.5",
|
| 1044 |
+
"@esbuild/android-arm": "0.21.5",
|
| 1045 |
+
"@esbuild/android-arm64": "0.21.5",
|
| 1046 |
+
"@esbuild/android-x64": "0.21.5",
|
| 1047 |
+
"@esbuild/darwin-arm64": "0.21.5",
|
| 1048 |
+
"@esbuild/darwin-x64": "0.21.5",
|
| 1049 |
+
"@esbuild/freebsd-arm64": "0.21.5",
|
| 1050 |
+
"@esbuild/freebsd-x64": "0.21.5",
|
| 1051 |
+
"@esbuild/linux-arm": "0.21.5",
|
| 1052 |
+
"@esbuild/linux-arm64": "0.21.5",
|
| 1053 |
+
"@esbuild/linux-ia32": "0.21.5",
|
| 1054 |
+
"@esbuild/linux-loong64": "0.21.5",
|
| 1055 |
+
"@esbuild/linux-mips64el": "0.21.5",
|
| 1056 |
+
"@esbuild/linux-ppc64": "0.21.5",
|
| 1057 |
+
"@esbuild/linux-riscv64": "0.21.5",
|
| 1058 |
+
"@esbuild/linux-s390x": "0.21.5",
|
| 1059 |
+
"@esbuild/linux-x64": "0.21.5",
|
| 1060 |
+
"@esbuild/netbsd-x64": "0.21.5",
|
| 1061 |
+
"@esbuild/openbsd-x64": "0.21.5",
|
| 1062 |
+
"@esbuild/sunos-x64": "0.21.5",
|
| 1063 |
+
"@esbuild/win32-arm64": "0.21.5",
|
| 1064 |
+
"@esbuild/win32-ia32": "0.21.5",
|
| 1065 |
+
"@esbuild/win32-x64": "0.21.5"
|
| 1066 |
+
}
|
| 1067 |
+
},
|
| 1068 |
+
"node_modules/estree-walker": {
|
| 1069 |
+
"version": "3.0.3",
|
| 1070 |
+
"resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz",
|
| 1071 |
+
"integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==",
|
| 1072 |
+
"dev": true,
|
| 1073 |
+
"license": "MIT",
|
| 1074 |
+
"dependencies": {
|
| 1075 |
+
"@types/estree": "^1.0.0"
|
| 1076 |
+
}
|
| 1077 |
+
},
|
| 1078 |
+
"node_modules/execa": {
|
| 1079 |
+
"version": "8.0.1",
|
| 1080 |
+
"resolved": "https://registry.npmjs.org/execa/-/execa-8.0.1.tgz",
|
| 1081 |
+
"integrity": "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==",
|
| 1082 |
+
"dev": true,
|
| 1083 |
+
"license": "MIT",
|
| 1084 |
+
"dependencies": {
|
| 1085 |
+
"cross-spawn": "^7.0.3",
|
| 1086 |
+
"get-stream": "^8.0.1",
|
| 1087 |
+
"human-signals": "^5.0.0",
|
| 1088 |
+
"is-stream": "^3.0.0",
|
| 1089 |
+
"merge-stream": "^2.0.0",
|
| 1090 |
+
"npm-run-path": "^5.1.0",
|
| 1091 |
+
"onetime": "^6.0.0",
|
| 1092 |
+
"signal-exit": "^4.1.0",
|
| 1093 |
+
"strip-final-newline": "^3.0.0"
|
| 1094 |
+
},
|
| 1095 |
+
"engines": {
|
| 1096 |
+
"node": ">=16.17"
|
| 1097 |
+
},
|
| 1098 |
+
"funding": {
|
| 1099 |
+
"url": "https://github.com/sindresorhus/execa?sponsor=1"
|
| 1100 |
+
}
|
| 1101 |
+
},
|
| 1102 |
+
"node_modules/fsevents": {
|
| 1103 |
+
"version": "2.3.3",
|
| 1104 |
+
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
|
| 1105 |
+
"integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
|
| 1106 |
+
"dev": true,
|
| 1107 |
+
"hasInstallScript": true,
|
| 1108 |
+
"license": "MIT",
|
| 1109 |
+
"optional": true,
|
| 1110 |
+
"os": [
|
| 1111 |
+
"darwin"
|
| 1112 |
+
],
|
| 1113 |
+
"engines": {
|
| 1114 |
+
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
|
| 1115 |
+
}
|
| 1116 |
+
},
|
| 1117 |
+
"node_modules/get-func-name": {
|
| 1118 |
+
"version": "2.0.2",
|
| 1119 |
+
"resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.2.tgz",
|
| 1120 |
+
"integrity": "sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==",
|
| 1121 |
+
"dev": true,
|
| 1122 |
+
"license": "MIT",
|
| 1123 |
+
"engines": {
|
| 1124 |
+
"node": "*"
|
| 1125 |
+
}
|
| 1126 |
+
},
|
| 1127 |
+
"node_modules/get-stream": {
|
| 1128 |
+
"version": "8.0.1",
|
| 1129 |
+
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-8.0.1.tgz",
|
| 1130 |
+
"integrity": "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==",
|
| 1131 |
+
"dev": true,
|
| 1132 |
+
"license": "MIT",
|
| 1133 |
+
"engines": {
|
| 1134 |
+
"node": ">=16"
|
| 1135 |
+
},
|
| 1136 |
+
"funding": {
|
| 1137 |
+
"url": "https://github.com/sponsors/sindresorhus"
|
| 1138 |
+
}
|
| 1139 |
+
},
|
| 1140 |
+
"node_modules/human-signals": {
|
| 1141 |
+
"version": "5.0.0",
|
| 1142 |
+
"resolved": "https://registry.npmjs.org/human-signals/-/human-signals-5.0.0.tgz",
|
| 1143 |
+
"integrity": "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==",
|
| 1144 |
+
"dev": true,
|
| 1145 |
+
"license": "Apache-2.0",
|
| 1146 |
+
"engines": {
|
| 1147 |
+
"node": ">=16.17.0"
|
| 1148 |
+
}
|
| 1149 |
+
},
|
| 1150 |
+
"node_modules/is-stream": {
|
| 1151 |
+
"version": "3.0.0",
|
| 1152 |
+
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz",
|
| 1153 |
+
"integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==",
|
| 1154 |
+
"dev": true,
|
| 1155 |
+
"license": "MIT",
|
| 1156 |
+
"engines": {
|
| 1157 |
+
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
|
| 1158 |
+
},
|
| 1159 |
+
"funding": {
|
| 1160 |
+
"url": "https://github.com/sponsors/sindresorhus"
|
| 1161 |
+
}
|
| 1162 |
+
},
|
| 1163 |
+
"node_modules/isexe": {
|
| 1164 |
+
"version": "2.0.0",
|
| 1165 |
+
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
|
| 1166 |
+
"integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
|
| 1167 |
+
"dev": true,
|
| 1168 |
+
"license": "ISC"
|
| 1169 |
+
},
|
| 1170 |
+
"node_modules/js-tokens": {
|
| 1171 |
+
"version": "9.0.1",
|
| 1172 |
+
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz",
|
| 1173 |
+
"integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==",
|
| 1174 |
+
"dev": true,
|
| 1175 |
+
"license": "MIT"
|
| 1176 |
+
},
|
| 1177 |
+
"node_modules/local-pkg": {
|
| 1178 |
+
"version": "0.5.1",
|
| 1179 |
+
"resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.5.1.tgz",
|
| 1180 |
+
"integrity": "sha512-9rrA30MRRP3gBD3HTGnC6cDFpaE1kVDWxWgqWJUN0RvDNAo+Nz/9GxB+nHOH0ifbVFy0hSA1V6vFDvnx54lTEQ==",
|
| 1181 |
+
"dev": true,
|
| 1182 |
+
"license": "MIT",
|
| 1183 |
+
"dependencies": {
|
| 1184 |
+
"mlly": "^1.7.3",
|
| 1185 |
+
"pkg-types": "^1.2.1"
|
| 1186 |
+
},
|
| 1187 |
+
"engines": {
|
| 1188 |
+
"node": ">=14"
|
| 1189 |
+
},
|
| 1190 |
+
"funding": {
|
| 1191 |
+
"url": "https://github.com/sponsors/antfu"
|
| 1192 |
+
}
|
| 1193 |
+
},
|
| 1194 |
+
"node_modules/loupe": {
|
| 1195 |
+
"version": "2.3.7",
|
| 1196 |
+
"resolved": "https://registry.npmjs.org/loupe/-/loupe-2.3.7.tgz",
|
| 1197 |
+
"integrity": "sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==",
|
| 1198 |
+
"dev": true,
|
| 1199 |
+
"license": "MIT",
|
| 1200 |
+
"dependencies": {
|
| 1201 |
+
"get-func-name": "^2.0.1"
|
| 1202 |
+
}
|
| 1203 |
+
},
|
| 1204 |
+
"node_modules/magic-string": {
|
| 1205 |
+
"version": "0.30.21",
|
| 1206 |
+
"resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz",
|
| 1207 |
+
"integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==",
|
| 1208 |
+
"dev": true,
|
| 1209 |
+
"license": "MIT",
|
| 1210 |
+
"dependencies": {
|
| 1211 |
+
"@jridgewell/sourcemap-codec": "^1.5.5"
|
| 1212 |
+
}
|
| 1213 |
+
},
|
| 1214 |
+
"node_modules/merge-stream": {
|
| 1215 |
+
"version": "2.0.0",
|
| 1216 |
+
"resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
|
| 1217 |
+
"integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==",
|
| 1218 |
+
"dev": true,
|
| 1219 |
+
"license": "MIT"
|
| 1220 |
+
},
|
| 1221 |
+
"node_modules/mimic-fn": {
|
| 1222 |
+
"version": "4.0.0",
|
| 1223 |
+
"resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz",
|
| 1224 |
+
"integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==",
|
| 1225 |
+
"dev": true,
|
| 1226 |
+
"license": "MIT",
|
| 1227 |
+
"engines": {
|
| 1228 |
+
"node": ">=12"
|
| 1229 |
+
},
|
| 1230 |
+
"funding": {
|
| 1231 |
+
"url": "https://github.com/sponsors/sindresorhus"
|
| 1232 |
+
}
|
| 1233 |
+
},
|
| 1234 |
+
"node_modules/mlly": {
|
| 1235 |
+
"version": "1.8.2",
|
| 1236 |
+
"resolved": "https://registry.npmjs.org/mlly/-/mlly-1.8.2.tgz",
|
| 1237 |
+
"integrity": "sha512-d+ObxMQFmbt10sretNDytwt85VrbkhhUA/JBGm1MPaWJ65Cl4wOgLaB1NYvJSZ0Ef03MMEU/0xpPMXUIQ29UfA==",
|
| 1238 |
+
"dev": true,
|
| 1239 |
+
"license": "MIT",
|
| 1240 |
+
"dependencies": {
|
| 1241 |
+
"acorn": "^8.16.0",
|
| 1242 |
+
"pathe": "^2.0.3",
|
| 1243 |
+
"pkg-types": "^1.3.1",
|
| 1244 |
+
"ufo": "^1.6.3"
|
| 1245 |
+
}
|
| 1246 |
+
},
|
| 1247 |
+
"node_modules/mlly/node_modules/pathe": {
|
| 1248 |
+
"version": "2.0.3",
|
| 1249 |
+
"resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz",
|
| 1250 |
+
"integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==",
|
| 1251 |
+
"dev": true,
|
| 1252 |
+
"license": "MIT"
|
| 1253 |
+
},
|
| 1254 |
+
"node_modules/ms": {
|
| 1255 |
+
"version": "2.1.3",
|
| 1256 |
+
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
| 1257 |
+
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
|
| 1258 |
+
"dev": true,
|
| 1259 |
+
"license": "MIT"
|
| 1260 |
+
},
|
| 1261 |
+
"node_modules/nanoid": {
|
| 1262 |
+
"version": "3.3.11",
|
| 1263 |
+
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
|
| 1264 |
+
"integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
|
| 1265 |
+
"dev": true,
|
| 1266 |
+
"funding": [
|
| 1267 |
+
{
|
| 1268 |
+
"type": "github",
|
| 1269 |
+
"url": "https://github.com/sponsors/ai"
|
| 1270 |
+
}
|
| 1271 |
+
],
|
| 1272 |
+
"license": "MIT",
|
| 1273 |
+
"bin": {
|
| 1274 |
+
"nanoid": "bin/nanoid.cjs"
|
| 1275 |
+
},
|
| 1276 |
+
"engines": {
|
| 1277 |
+
"node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
|
| 1278 |
+
}
|
| 1279 |
+
},
|
| 1280 |
+
"node_modules/npm-run-path": {
|
| 1281 |
+
"version": "5.3.0",
|
| 1282 |
+
"resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.3.0.tgz",
|
| 1283 |
+
"integrity": "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==",
|
| 1284 |
+
"dev": true,
|
| 1285 |
+
"license": "MIT",
|
| 1286 |
+
"dependencies": {
|
| 1287 |
+
"path-key": "^4.0.0"
|
| 1288 |
+
},
|
| 1289 |
+
"engines": {
|
| 1290 |
+
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
|
| 1291 |
+
},
|
| 1292 |
+
"funding": {
|
| 1293 |
+
"url": "https://github.com/sponsors/sindresorhus"
|
| 1294 |
+
}
|
| 1295 |
+
},
|
| 1296 |
+
"node_modules/npm-run-path/node_modules/path-key": {
|
| 1297 |
+
"version": "4.0.0",
|
| 1298 |
+
"resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz",
|
| 1299 |
+
"integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==",
|
| 1300 |
+
"dev": true,
|
| 1301 |
+
"license": "MIT",
|
| 1302 |
+
"engines": {
|
| 1303 |
+
"node": ">=12"
|
| 1304 |
+
},
|
| 1305 |
+
"funding": {
|
| 1306 |
+
"url": "https://github.com/sponsors/sindresorhus"
|
| 1307 |
+
}
|
| 1308 |
+
},
|
| 1309 |
+
"node_modules/onetime": {
|
| 1310 |
+
"version": "6.0.0",
|
| 1311 |
+
"resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz",
|
| 1312 |
+
"integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==",
|
| 1313 |
+
"dev": true,
|
| 1314 |
+
"license": "MIT",
|
| 1315 |
+
"dependencies": {
|
| 1316 |
+
"mimic-fn": "^4.0.0"
|
| 1317 |
+
},
|
| 1318 |
+
"engines": {
|
| 1319 |
+
"node": ">=12"
|
| 1320 |
+
},
|
| 1321 |
+
"funding": {
|
| 1322 |
+
"url": "https://github.com/sponsors/sindresorhus"
|
| 1323 |
+
}
|
| 1324 |
+
},
|
| 1325 |
+
"node_modules/p-limit": {
|
| 1326 |
+
"version": "5.0.0",
|
| 1327 |
+
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-5.0.0.tgz",
|
| 1328 |
+
"integrity": "sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ==",
|
| 1329 |
+
"dev": true,
|
| 1330 |
+
"license": "MIT",
|
| 1331 |
+
"dependencies": {
|
| 1332 |
+
"yocto-queue": "^1.0.0"
|
| 1333 |
+
},
|
| 1334 |
+
"engines": {
|
| 1335 |
+
"node": ">=18"
|
| 1336 |
+
},
|
| 1337 |
+
"funding": {
|
| 1338 |
+
"url": "https://github.com/sponsors/sindresorhus"
|
| 1339 |
+
}
|
| 1340 |
+
},
|
| 1341 |
+
"node_modules/path-key": {
|
| 1342 |
+
"version": "3.1.1",
|
| 1343 |
+
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
|
| 1344 |
+
"integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
|
| 1345 |
+
"dev": true,
|
| 1346 |
+
"license": "MIT",
|
| 1347 |
+
"engines": {
|
| 1348 |
+
"node": ">=8"
|
| 1349 |
+
}
|
| 1350 |
+
},
|
| 1351 |
+
"node_modules/pathe": {
|
| 1352 |
+
"version": "1.1.2",
|
| 1353 |
+
"resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz",
|
| 1354 |
+
"integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==",
|
| 1355 |
+
"dev": true,
|
| 1356 |
+
"license": "MIT"
|
| 1357 |
+
},
|
| 1358 |
+
"node_modules/pathval": {
|
| 1359 |
+
"version": "1.1.1",
|
| 1360 |
+
"resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.1.tgz",
|
| 1361 |
+
"integrity": "sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==",
|
| 1362 |
+
"dev": true,
|
| 1363 |
+
"license": "MIT",
|
| 1364 |
+
"engines": {
|
| 1365 |
+
"node": "*"
|
| 1366 |
+
}
|
| 1367 |
+
},
|
| 1368 |
+
"node_modules/picocolors": {
|
| 1369 |
+
"version": "1.1.1",
|
| 1370 |
+
"resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
|
| 1371 |
+
"integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
|
| 1372 |
+
"dev": true,
|
| 1373 |
+
"license": "ISC"
|
| 1374 |
+
},
|
| 1375 |
+
"node_modules/pkg-types": {
|
| 1376 |
+
"version": "1.3.1",
|
| 1377 |
+
"resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.3.1.tgz",
|
| 1378 |
+
"integrity": "sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==",
|
| 1379 |
+
"dev": true,
|
| 1380 |
+
"license": "MIT",
|
| 1381 |
+
"dependencies": {
|
| 1382 |
+
"confbox": "^0.1.8",
|
| 1383 |
+
"mlly": "^1.7.4",
|
| 1384 |
+
"pathe": "^2.0.1"
|
| 1385 |
+
}
|
| 1386 |
+
},
|
| 1387 |
+
"node_modules/pkg-types/node_modules/pathe": {
|
| 1388 |
+
"version": "2.0.3",
|
| 1389 |
+
"resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz",
|
| 1390 |
+
"integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==",
|
| 1391 |
+
"dev": true,
|
| 1392 |
+
"license": "MIT"
|
| 1393 |
+
},
|
| 1394 |
+
"node_modules/postcss": {
|
| 1395 |
+
"version": "8.5.8",
|
| 1396 |
+
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.8.tgz",
|
| 1397 |
+
"integrity": "sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==",
|
| 1398 |
+
"dev": true,
|
| 1399 |
+
"funding": [
|
| 1400 |
+
{
|
| 1401 |
+
"type": "opencollective",
|
| 1402 |
+
"url": "https://opencollective.com/postcss/"
|
| 1403 |
+
},
|
| 1404 |
+
{
|
| 1405 |
+
"type": "tidelift",
|
| 1406 |
+
"url": "https://tidelift.com/funding/github/npm/postcss"
|
| 1407 |
+
},
|
| 1408 |
+
{
|
| 1409 |
+
"type": "github",
|
| 1410 |
+
"url": "https://github.com/sponsors/ai"
|
| 1411 |
+
}
|
| 1412 |
+
],
|
| 1413 |
+
"license": "MIT",
|
| 1414 |
+
"dependencies": {
|
| 1415 |
+
"nanoid": "^3.3.11",
|
| 1416 |
+
"picocolors": "^1.1.1",
|
| 1417 |
+
"source-map-js": "^1.2.1"
|
| 1418 |
+
},
|
| 1419 |
+
"engines": {
|
| 1420 |
+
"node": "^10 || ^12 || >=14"
|
| 1421 |
+
}
|
| 1422 |
+
},
|
| 1423 |
+
"node_modules/pretty-format": {
|
| 1424 |
+
"version": "29.7.0",
|
| 1425 |
+
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz",
|
| 1426 |
+
"integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==",
|
| 1427 |
+
"dev": true,
|
| 1428 |
+
"license": "MIT",
|
| 1429 |
+
"dependencies": {
|
| 1430 |
+
"@jest/schemas": "^29.6.3",
|
| 1431 |
+
"ansi-styles": "^5.0.0",
|
| 1432 |
+
"react-is": "^18.0.0"
|
| 1433 |
+
},
|
| 1434 |
+
"engines": {
|
| 1435 |
+
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
|
| 1436 |
+
}
|
| 1437 |
+
},
|
| 1438 |
+
"node_modules/react-is": {
|
| 1439 |
+
"version": "18.3.1",
|
| 1440 |
+
"resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz",
|
| 1441 |
+
"integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==",
|
| 1442 |
+
"dev": true,
|
| 1443 |
+
"license": "MIT"
|
| 1444 |
+
},
|
| 1445 |
+
"node_modules/rollup": {
|
| 1446 |
+
"version": "4.60.1",
|
| 1447 |
+
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.1.tgz",
|
| 1448 |
+
"integrity": "sha512-VmtB2rFU/GroZ4oL8+ZqXgSA38O6GR8KSIvWmEFv63pQ0G6KaBH9s07PO8XTXP4vI+3UJUEypOfjkGfmSBBR0w==",
|
| 1449 |
+
"dev": true,
|
| 1450 |
+
"license": "MIT",
|
| 1451 |
+
"dependencies": {
|
| 1452 |
+
"@types/estree": "1.0.8"
|
| 1453 |
+
},
|
| 1454 |
+
"bin": {
|
| 1455 |
+
"rollup": "dist/bin/rollup"
|
| 1456 |
+
},
|
| 1457 |
+
"engines": {
|
| 1458 |
+
"node": ">=18.0.0",
|
| 1459 |
+
"npm": ">=8.0.0"
|
| 1460 |
+
},
|
| 1461 |
+
"optionalDependencies": {
|
| 1462 |
+
"@rollup/rollup-android-arm-eabi": "4.60.1",
|
| 1463 |
+
"@rollup/rollup-android-arm64": "4.60.1",
|
| 1464 |
+
"@rollup/rollup-darwin-arm64": "4.60.1",
|
| 1465 |
+
"@rollup/rollup-darwin-x64": "4.60.1",
|
| 1466 |
+
"@rollup/rollup-freebsd-arm64": "4.60.1",
|
| 1467 |
+
"@rollup/rollup-freebsd-x64": "4.60.1",
|
| 1468 |
+
"@rollup/rollup-linux-arm-gnueabihf": "4.60.1",
|
| 1469 |
+
"@rollup/rollup-linux-arm-musleabihf": "4.60.1",
|
| 1470 |
+
"@rollup/rollup-linux-arm64-gnu": "4.60.1",
|
| 1471 |
+
"@rollup/rollup-linux-arm64-musl": "4.60.1",
|
| 1472 |
+
"@rollup/rollup-linux-loong64-gnu": "4.60.1",
|
| 1473 |
+
"@rollup/rollup-linux-loong64-musl": "4.60.1",
|
| 1474 |
+
"@rollup/rollup-linux-ppc64-gnu": "4.60.1",
|
| 1475 |
+
"@rollup/rollup-linux-ppc64-musl": "4.60.1",
|
| 1476 |
+
"@rollup/rollup-linux-riscv64-gnu": "4.60.1",
|
| 1477 |
+
"@rollup/rollup-linux-riscv64-musl": "4.60.1",
|
| 1478 |
+
"@rollup/rollup-linux-s390x-gnu": "4.60.1",
|
| 1479 |
+
"@rollup/rollup-linux-x64-gnu": "4.60.1",
|
| 1480 |
+
"@rollup/rollup-linux-x64-musl": "4.60.1",
|
| 1481 |
+
"@rollup/rollup-openbsd-x64": "4.60.1",
|
| 1482 |
+
"@rollup/rollup-openharmony-arm64": "4.60.1",
|
| 1483 |
+
"@rollup/rollup-win32-arm64-msvc": "4.60.1",
|
| 1484 |
+
"@rollup/rollup-win32-ia32-msvc": "4.60.1",
|
| 1485 |
+
"@rollup/rollup-win32-x64-gnu": "4.60.1",
|
| 1486 |
+
"@rollup/rollup-win32-x64-msvc": "4.60.1",
|
| 1487 |
+
"fsevents": "~2.3.2"
|
| 1488 |
+
}
|
| 1489 |
+
},
|
| 1490 |
+
"node_modules/shebang-command": {
|
| 1491 |
+
"version": "2.0.0",
|
| 1492 |
+
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
|
| 1493 |
+
"integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
|
| 1494 |
+
"dev": true,
|
| 1495 |
+
"license": "MIT",
|
| 1496 |
+
"dependencies": {
|
| 1497 |
+
"shebang-regex": "^3.0.0"
|
| 1498 |
+
},
|
| 1499 |
+
"engines": {
|
| 1500 |
+
"node": ">=8"
|
| 1501 |
+
}
|
| 1502 |
+
},
|
| 1503 |
+
"node_modules/shebang-regex": {
|
| 1504 |
+
"version": "3.0.0",
|
| 1505 |
+
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
|
| 1506 |
+
"integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
|
| 1507 |
+
"dev": true,
|
| 1508 |
+
"license": "MIT",
|
| 1509 |
+
"engines": {
|
| 1510 |
+
"node": ">=8"
|
| 1511 |
+
}
|
| 1512 |
+
},
|
| 1513 |
+
"node_modules/siginfo": {
|
| 1514 |
+
"version": "2.0.0",
|
| 1515 |
+
"resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz",
|
| 1516 |
+
"integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==",
|
| 1517 |
+
"dev": true,
|
| 1518 |
+
"license": "ISC"
|
| 1519 |
+
},
|
| 1520 |
+
"node_modules/signal-exit": {
|
| 1521 |
+
"version": "4.1.0",
|
| 1522 |
+
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
|
| 1523 |
+
"integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
|
| 1524 |
+
"dev": true,
|
| 1525 |
+
"license": "ISC",
|
| 1526 |
+
"engines": {
|
| 1527 |
+
"node": ">=14"
|
| 1528 |
+
},
|
| 1529 |
+
"funding": {
|
| 1530 |
+
"url": "https://github.com/sponsors/isaacs"
|
| 1531 |
+
}
|
| 1532 |
+
},
|
| 1533 |
+
"node_modules/source-map-js": {
|
| 1534 |
+
"version": "1.2.1",
|
| 1535 |
+
"resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
|
| 1536 |
+
"integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
|
| 1537 |
+
"dev": true,
|
| 1538 |
+
"license": "BSD-3-Clause",
|
| 1539 |
+
"engines": {
|
| 1540 |
+
"node": ">=0.10.0"
|
| 1541 |
+
}
|
| 1542 |
+
},
|
| 1543 |
+
"node_modules/stackback": {
|
| 1544 |
+
"version": "0.0.2",
|
| 1545 |
+
"resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz",
|
| 1546 |
+
"integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==",
|
| 1547 |
+
"dev": true,
|
| 1548 |
+
"license": "MIT"
|
| 1549 |
+
},
|
| 1550 |
+
"node_modules/std-env": {
|
| 1551 |
+
"version": "3.10.0",
|
| 1552 |
+
"resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz",
|
| 1553 |
+
"integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==",
|
| 1554 |
+
"dev": true,
|
| 1555 |
+
"license": "MIT"
|
| 1556 |
+
},
|
| 1557 |
+
"node_modules/strip-final-newline": {
|
| 1558 |
+
"version": "3.0.0",
|
| 1559 |
+
"resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz",
|
| 1560 |
+
"integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==",
|
| 1561 |
+
"dev": true,
|
| 1562 |
+
"license": "MIT",
|
| 1563 |
+
"engines": {
|
| 1564 |
+
"node": ">=12"
|
| 1565 |
+
},
|
| 1566 |
+
"funding": {
|
| 1567 |
+
"url": "https://github.com/sponsors/sindresorhus"
|
| 1568 |
+
}
|
| 1569 |
+
},
|
| 1570 |
+
"node_modules/strip-literal": {
|
| 1571 |
+
"version": "2.1.1",
|
| 1572 |
+
"resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-2.1.1.tgz",
|
| 1573 |
+
"integrity": "sha512-631UJ6O00eNGfMiWG78ck80dfBab8X6IVFB51jZK5Icd7XAs60Z5y7QdSd/wGIklnWvRbUNloVzhOKKmutxQ6Q==",
|
| 1574 |
+
"dev": true,
|
| 1575 |
+
"license": "MIT",
|
| 1576 |
+
"dependencies": {
|
| 1577 |
+
"js-tokens": "^9.0.1"
|
| 1578 |
+
},
|
| 1579 |
+
"funding": {
|
| 1580 |
+
"url": "https://github.com/sponsors/antfu"
|
| 1581 |
+
}
|
| 1582 |
+
},
|
| 1583 |
+
"node_modules/tinybench": {
|
| 1584 |
+
"version": "2.9.0",
|
| 1585 |
+
"resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz",
|
| 1586 |
+
"integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==",
|
| 1587 |
+
"dev": true,
|
| 1588 |
+
"license": "MIT"
|
| 1589 |
+
},
|
| 1590 |
+
"node_modules/tinypool": {
|
| 1591 |
+
"version": "0.8.4",
|
| 1592 |
+
"resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.8.4.tgz",
|
| 1593 |
+
"integrity": "sha512-i11VH5gS6IFeLY3gMBQ00/MmLncVP7JLXOw1vlgkytLmJK7QnEr7NXf0LBdxfmNPAeyetukOk0bOYrJrFGjYJQ==",
|
| 1594 |
+
"dev": true,
|
| 1595 |
+
"license": "MIT",
|
| 1596 |
+
"engines": {
|
| 1597 |
+
"node": ">=14.0.0"
|
| 1598 |
+
}
|
| 1599 |
+
},
|
| 1600 |
+
"node_modules/tinyspy": {
|
| 1601 |
+
"version": "2.2.1",
|
| 1602 |
+
"resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-2.2.1.tgz",
|
| 1603 |
+
"integrity": "sha512-KYad6Vy5VDWV4GH3fjpseMQ/XU2BhIYP7Vzd0LG44qRWm/Yt2WCOTicFdvmgo6gWaqooMQCawTtILVQJupKu7A==",
|
| 1604 |
+
"dev": true,
|
| 1605 |
+
"license": "MIT",
|
| 1606 |
+
"engines": {
|
| 1607 |
+
"node": ">=14.0.0"
|
| 1608 |
+
}
|
| 1609 |
+
},
|
| 1610 |
+
"node_modules/type-detect": {
|
| 1611 |
+
"version": "4.1.0",
|
| 1612 |
+
"resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.1.0.tgz",
|
| 1613 |
+
"integrity": "sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw==",
|
| 1614 |
+
"dev": true,
|
| 1615 |
+
"license": "MIT",
|
| 1616 |
+
"engines": {
|
| 1617 |
+
"node": ">=4"
|
| 1618 |
+
}
|
| 1619 |
+
},
|
| 1620 |
+
"node_modules/typescript": {
|
| 1621 |
+
"version": "5.9.3",
|
| 1622 |
+
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz",
|
| 1623 |
+
"integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
|
| 1624 |
+
"dev": true,
|
| 1625 |
+
"license": "Apache-2.0",
|
| 1626 |
+
"bin": {
|
| 1627 |
+
"tsc": "bin/tsc",
|
| 1628 |
+
"tsserver": "bin/tsserver"
|
| 1629 |
+
},
|
| 1630 |
+
"engines": {
|
| 1631 |
+
"node": ">=14.17"
|
| 1632 |
+
}
|
| 1633 |
+
},
|
| 1634 |
+
"node_modules/ufo": {
|
| 1635 |
+
"version": "1.6.3",
|
| 1636 |
+
"resolved": "https://registry.npmjs.org/ufo/-/ufo-1.6.3.tgz",
|
| 1637 |
+
"integrity": "sha512-yDJTmhydvl5lJzBmy/hyOAA0d+aqCBuwl818haVdYCRrWV84o7YyeVm4QlVHStqNrrJSTb6jKuFAVqAFsr+K3Q==",
|
| 1638 |
+
"dev": true,
|
| 1639 |
+
"license": "MIT"
|
| 1640 |
+
},
|
| 1641 |
+
"node_modules/undici-types": {
|
| 1642 |
+
"version": "6.21.0",
|
| 1643 |
+
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz",
|
| 1644 |
+
"integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==",
|
| 1645 |
+
"dev": true,
|
| 1646 |
+
"license": "MIT"
|
| 1647 |
+
},
|
| 1648 |
+
"node_modules/vite": {
|
| 1649 |
+
"version": "5.4.21",
|
| 1650 |
+
"resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz",
|
| 1651 |
+
"integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==",
|
| 1652 |
+
"dev": true,
|
| 1653 |
+
"license": "MIT",
|
| 1654 |
+
"dependencies": {
|
| 1655 |
+
"esbuild": "^0.21.3",
|
| 1656 |
+
"postcss": "^8.4.43",
|
| 1657 |
+
"rollup": "^4.20.0"
|
| 1658 |
+
},
|
| 1659 |
+
"bin": {
|
| 1660 |
+
"vite": "bin/vite.js"
|
| 1661 |
+
},
|
| 1662 |
+
"engines": {
|
| 1663 |
+
"node": "^18.0.0 || >=20.0.0"
|
| 1664 |
+
},
|
| 1665 |
+
"funding": {
|
| 1666 |
+
"url": "https://github.com/vitejs/vite?sponsor=1"
|
| 1667 |
+
},
|
| 1668 |
+
"optionalDependencies": {
|
| 1669 |
+
"fsevents": "~2.3.3"
|
| 1670 |
+
},
|
| 1671 |
+
"peerDependencies": {
|
| 1672 |
+
"@types/node": "^18.0.0 || >=20.0.0",
|
| 1673 |
+
"less": "*",
|
| 1674 |
+
"lightningcss": "^1.21.0",
|
| 1675 |
+
"sass": "*",
|
| 1676 |
+
"sass-embedded": "*",
|
| 1677 |
+
"stylus": "*",
|
| 1678 |
+
"sugarss": "*",
|
| 1679 |
+
"terser": "^5.4.0"
|
| 1680 |
+
},
|
| 1681 |
+
"peerDependenciesMeta": {
|
| 1682 |
+
"@types/node": {
|
| 1683 |
+
"optional": true
|
| 1684 |
+
},
|
| 1685 |
+
"less": {
|
| 1686 |
+
"optional": true
|
| 1687 |
+
},
|
| 1688 |
+
"lightningcss": {
|
| 1689 |
+
"optional": true
|
| 1690 |
+
},
|
| 1691 |
+
"sass": {
|
| 1692 |
+
"optional": true
|
| 1693 |
+
},
|
| 1694 |
+
"sass-embedded": {
|
| 1695 |
+
"optional": true
|
| 1696 |
+
},
|
| 1697 |
+
"stylus": {
|
| 1698 |
+
"optional": true
|
| 1699 |
+
},
|
| 1700 |
+
"sugarss": {
|
| 1701 |
+
"optional": true
|
| 1702 |
+
},
|
| 1703 |
+
"terser": {
|
| 1704 |
+
"optional": true
|
| 1705 |
+
}
|
| 1706 |
+
}
|
| 1707 |
+
},
|
| 1708 |
+
"node_modules/vite-node": {
|
| 1709 |
+
"version": "1.6.1",
|
| 1710 |
+
"resolved": "https://registry.npmjs.org/vite-node/-/vite-node-1.6.1.tgz",
|
| 1711 |
+
"integrity": "sha512-YAXkfvGtuTzwWbDSACdJSg4A4DZiAqckWe90Zapc/sEX3XvHcw1NdurM/6od8J207tSDqNbSsgdCacBgvJKFuA==",
|
| 1712 |
+
"dev": true,
|
| 1713 |
+
"license": "MIT",
|
| 1714 |
+
"dependencies": {
|
| 1715 |
+
"cac": "^6.7.14",
|
| 1716 |
+
"debug": "^4.3.4",
|
| 1717 |
+
"pathe": "^1.1.1",
|
| 1718 |
+
"picocolors": "^1.0.0",
|
| 1719 |
+
"vite": "^5.0.0"
|
| 1720 |
+
},
|
| 1721 |
+
"bin": {
|
| 1722 |
+
"vite-node": "vite-node.mjs"
|
| 1723 |
+
},
|
| 1724 |
+
"engines": {
|
| 1725 |
+
"node": "^18.0.0 || >=20.0.0"
|
| 1726 |
+
},
|
| 1727 |
+
"funding": {
|
| 1728 |
+
"url": "https://opencollective.com/vitest"
|
| 1729 |
+
}
|
| 1730 |
+
},
|
| 1731 |
+
"node_modules/vitest": {
|
| 1732 |
+
"version": "1.6.1",
|
| 1733 |
+
"resolved": "https://registry.npmjs.org/vitest/-/vitest-1.6.1.tgz",
|
| 1734 |
+
"integrity": "sha512-Ljb1cnSJSivGN0LqXd/zmDbWEM0RNNg2t1QW/XUhYl/qPqyu7CsqeWtqQXHVaJsecLPuDoak2oJcZN2QoRIOag==",
|
| 1735 |
+
"dev": true,
|
| 1736 |
+
"license": "MIT",
|
| 1737 |
+
"dependencies": {
|
| 1738 |
+
"@vitest/expect": "1.6.1",
|
| 1739 |
+
"@vitest/runner": "1.6.1",
|
| 1740 |
+
"@vitest/snapshot": "1.6.1",
|
| 1741 |
+
"@vitest/spy": "1.6.1",
|
| 1742 |
+
"@vitest/utils": "1.6.1",
|
| 1743 |
+
"acorn-walk": "^8.3.2",
|
| 1744 |
+
"chai": "^4.3.10",
|
| 1745 |
+
"debug": "^4.3.4",
|
| 1746 |
+
"execa": "^8.0.1",
|
| 1747 |
+
"local-pkg": "^0.5.0",
|
| 1748 |
+
"magic-string": "^0.30.5",
|
| 1749 |
+
"pathe": "^1.1.1",
|
| 1750 |
+
"picocolors": "^1.0.0",
|
| 1751 |
+
"std-env": "^3.5.0",
|
| 1752 |
+
"strip-literal": "^2.0.0",
|
| 1753 |
+
"tinybench": "^2.5.1",
|
| 1754 |
+
"tinypool": "^0.8.3",
|
| 1755 |
+
"vite": "^5.0.0",
|
| 1756 |
+
"vite-node": "1.6.1",
|
| 1757 |
+
"why-is-node-running": "^2.2.2"
|
| 1758 |
+
},
|
| 1759 |
+
"bin": {
|
| 1760 |
+
"vitest": "vitest.mjs"
|
| 1761 |
+
},
|
| 1762 |
+
"engines": {
|
| 1763 |
+
"node": "^18.0.0 || >=20.0.0"
|
| 1764 |
+
},
|
| 1765 |
+
"funding": {
|
| 1766 |
+
"url": "https://opencollective.com/vitest"
|
| 1767 |
+
},
|
| 1768 |
+
"peerDependencies": {
|
| 1769 |
+
"@edge-runtime/vm": "*",
|
| 1770 |
+
"@types/node": "^18.0.0 || >=20.0.0",
|
| 1771 |
+
"@vitest/browser": "1.6.1",
|
| 1772 |
+
"@vitest/ui": "1.6.1",
|
| 1773 |
+
"happy-dom": "*",
|
| 1774 |
+
"jsdom": "*"
|
| 1775 |
+
},
|
| 1776 |
+
"peerDependenciesMeta": {
|
| 1777 |
+
"@edge-runtime/vm": {
|
| 1778 |
+
"optional": true
|
| 1779 |
+
},
|
| 1780 |
+
"@types/node": {
|
| 1781 |
+
"optional": true
|
| 1782 |
+
},
|
| 1783 |
+
"@vitest/browser": {
|
| 1784 |
+
"optional": true
|
| 1785 |
+
},
|
| 1786 |
+
"@vitest/ui": {
|
| 1787 |
+
"optional": true
|
| 1788 |
+
},
|
| 1789 |
+
"happy-dom": {
|
| 1790 |
+
"optional": true
|
| 1791 |
+
},
|
| 1792 |
+
"jsdom": {
|
| 1793 |
+
"optional": true
|
| 1794 |
+
}
|
| 1795 |
+
}
|
| 1796 |
+
},
|
| 1797 |
+
"node_modules/which": {
|
| 1798 |
+
"version": "2.0.2",
|
| 1799 |
+
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
|
| 1800 |
+
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
|
| 1801 |
+
"dev": true,
|
| 1802 |
+
"license": "ISC",
|
| 1803 |
+
"dependencies": {
|
| 1804 |
+
"isexe": "^2.0.0"
|
| 1805 |
+
},
|
| 1806 |
+
"bin": {
|
| 1807 |
+
"node-which": "bin/node-which"
|
| 1808 |
+
},
|
| 1809 |
+
"engines": {
|
| 1810 |
+
"node": ">= 8"
|
| 1811 |
+
}
|
| 1812 |
+
},
|
| 1813 |
+
"node_modules/why-is-node-running": {
|
| 1814 |
+
"version": "2.3.0",
|
| 1815 |
+
"resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz",
|
| 1816 |
+
"integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==",
|
| 1817 |
+
"dev": true,
|
| 1818 |
+
"license": "MIT",
|
| 1819 |
+
"dependencies": {
|
| 1820 |
+
"siginfo": "^2.0.0",
|
| 1821 |
+
"stackback": "0.0.2"
|
| 1822 |
+
},
|
| 1823 |
+
"bin": {
|
| 1824 |
+
"why-is-node-running": "cli.js"
|
| 1825 |
+
},
|
| 1826 |
+
"engines": {
|
| 1827 |
+
"node": ">=8"
|
| 1828 |
+
}
|
| 1829 |
+
},
|
| 1830 |
+
"node_modules/yocto-queue": {
|
| 1831 |
+
"version": "1.2.2",
|
| 1832 |
+
"resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.2.2.tgz",
|
| 1833 |
+
"integrity": "sha512-4LCcse/U2MHZ63HAJVE+v71o7yOdIe4cZ70Wpf8D/IyjDKYQLV5GD46B+hSTjJsvV5PztjvHoU580EftxjDZFQ==",
|
| 1834 |
+
"dev": true,
|
| 1835 |
+
"license": "MIT",
|
| 1836 |
+
"engines": {
|
| 1837 |
+
"node": ">=12.20"
|
| 1838 |
+
},
|
| 1839 |
+
"funding": {
|
| 1840 |
+
"url": "https://github.com/sponsors/sindresorhus"
|
| 1841 |
+
}
|
| 1842 |
+
}
|
| 1843 |
+
}
|
| 1844 |
+
}
|
package.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "stack-2.9",
|
| 3 |
+
"version": "1.0.0",
|
| 4 |
+
"description": "Stack 2.9 - Voice-enabled AI coding assistant",
|
| 5 |
+
"type": "module",
|
| 6 |
+
"scripts": {
|
| 7 |
+
"build": "tsc",
|
| 8 |
+
"dev": "tsc --watch",
|
| 9 |
+
"test": "vitest"
|
| 10 |
+
},
|
| 11 |
+
"devDependencies": {
|
| 12 |
+
"typescript": "^5.3.0",
|
| 13 |
+
"@types/node": "^20.10.0",
|
| 14 |
+
"vitest": "^1.2.0"
|
| 15 |
+
}
|
| 16 |
+
}
|
convert_gguf.py → scripts/convert_gguf.py
RENAMED
|
File without changes
|
scripts/extract_patterns_from_git.py
ADDED
|
@@ -0,0 +1,457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Extract patterns from Git commit histories for Stack 2.9 training.
|
| 4 |
+
|
| 5 |
+
This script analyzes git repositories to discover successful coding patterns,
|
| 6 |
+
common error fixes, tool usage workflows, and team collaboration patterns.
|
| 7 |
+
The extracted patterns can be used to enhance the Pattern Memory system.
|
| 8 |
+
|
| 9 |
+
Usage:
|
| 10 |
+
python extract_patterns_from_git.py --repo /path/to/repo --output training-data/git_patterns.jsonl
|
| 11 |
+
python extract_patterns_from_git.py --repo . --output ./patterns.jsonl --min-commits 10
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import os
|
| 15 |
+
import json
|
| 16 |
+
import argparse
|
| 17 |
+
import subprocess
|
| 18 |
+
from pathlib import Path
|
| 19 |
+
from typing import Dict, List, Any, Optional, Set, Tuple
|
| 20 |
+
from collections import defaultdict, Counter
|
| 21 |
+
import re
|
| 22 |
+
from datetime import datetime
|
| 23 |
+
import hashlib
|
| 24 |
+
|
| 25 |
+
class GitPatternExtractor:
|
| 26 |
+
"""Extract training patterns from git commit histories."""
|
| 27 |
+
|
| 28 |
+
def __init__(self, repo_path: str, min_commits: int = 5):
|
| 29 |
+
self.repo_path = Path(repo_path)
|
| 30 |
+
self.min_commits = min_commits
|
| 31 |
+
self.patterns = []
|
| 32 |
+
self.stats = defaultdict(int)
|
| 33 |
+
|
| 34 |
+
def run_git_command(self, cmd: List[str]) -> str:
|
| 35 |
+
"""Run a git command and return output."""
|
| 36 |
+
try:
|
| 37 |
+
result = subprocess.run(
|
| 38 |
+
["git"] + cmd,
|
| 39 |
+
cwd=self.repo_path,
|
| 40 |
+
capture_output=True,
|
| 41 |
+
text=True,
|
| 42 |
+
timeout=30
|
| 43 |
+
)
|
| 44 |
+
return result.stdout.strip()
|
| 45 |
+
except subprocess.CalledProcessError as e:
|
| 46 |
+
print(f"Git command failed: {e}")
|
| 47 |
+
return ""
|
| 48 |
+
except subprocess.TimeoutExpired:
|
| 49 |
+
print(f"Git command timed out: {cmd}")
|
| 50 |
+
return ""
|
| 51 |
+
|
| 52 |
+
def get_branches(self) -> List[str]:
|
| 53 |
+
"""Get all branches."""
|
| 54 |
+
output = self.run_git_command(["branch", "-a"])
|
| 55 |
+
branches = [b.strip().replace('* ', '') for b in output.split('\n') if b.strip()]
|
| 56 |
+
return branches
|
| 57 |
+
|
| 58 |
+
def get_commit_history(self, branch: str = "HEAD", limit: Optional[int] = None) -> List[Dict[str, Any]]:
|
| 59 |
+
"""Get detailed commit history with stats."""
|
| 60 |
+
# Use pretty format to get: hash, author, date, subject, body
|
| 61 |
+
fmt = "--pretty=format:%H|%an|%ad|%s|%b"
|
| 62 |
+
cmd = ["log", branch, fmt, "--date=iso"]
|
| 63 |
+
if limit:
|
| 64 |
+
cmd.append(f"-{limit}")
|
| 65 |
+
|
| 66 |
+
output = self.run_git_command(cmd)
|
| 67 |
+
commits = []
|
| 68 |
+
|
| 69 |
+
for line in output.split('\n'):
|
| 70 |
+
if not line.strip():
|
| 71 |
+
continue
|
| 72 |
+
parts = line.split('|', 4)
|
| 73 |
+
if len(parts) == 5:
|
| 74 |
+
commit_hash, author, date, subject, body = parts
|
| 75 |
+
commits.append({
|
| 76 |
+
"hash": commit_hash,
|
| 77 |
+
"author": author,
|
| 78 |
+
"date": date,
|
| 79 |
+
"subject": subject,
|
| 80 |
+
"body": body,
|
| 81 |
+
"branch": branch
|
| 82 |
+
})
|
| 83 |
+
|
| 84 |
+
return commits
|
| 85 |
+
|
| 86 |
+
def get_commit_stats(self, commit_hash: str) -> Dict[str, Any]:
|
| 87 |
+
"""Get statistics for a commit: files changed, insertions, deletions."""
|
| 88 |
+
output = self.run_git_command(["show", "--stat", "--oneline", commit_hash])
|
| 89 |
+
|
| 90 |
+
stats = {
|
| 91 |
+
"files_changed": 0,
|
| 92 |
+
"insertions": 0,
|
| 93 |
+
"deletions": 0,
|
| 94 |
+
"file_types": Counter()
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
# Parse the --stat output
|
| 98 |
+
for line in output.split('\n'):
|
| 99 |
+
# Count file changes
|
| 100 |
+
if '|' in line and ('+' in line or '-' in line):
|
| 101 |
+
parts = line.split('|')
|
| 102 |
+
if len(parts) >= 2:
|
| 103 |
+
filename = parts[0].strip()
|
| 104 |
+
change_stats = parts[1].strip()
|
| 105 |
+
|
| 106 |
+
stats["files_changed"] += 1
|
| 107 |
+
|
| 108 |
+
# Extract file extension
|
| 109 |
+
if '.' in filename:
|
| 110 |
+
ext = filename.split('.')[-1].lower()
|
| 111 |
+
stats["file_types"][ext] += 1
|
| 112 |
+
|
| 113 |
+
# Count insertions/deletions
|
| 114 |
+
if '+' in change_stats:
|
| 115 |
+
try:
|
| 116 |
+
ins = int(change_stats.split('+')[0].strip().split()[0])
|
| 117 |
+
stats["insertions"] += ins
|
| 118 |
+
except:
|
| 119 |
+
pass
|
| 120 |
+
if '-' in change_stats:
|
| 121 |
+
try:
|
| 122 |
+
dels = change_stats.split('-')[0].strip().split()[-1]
|
| 123 |
+
stats["deletions"] += int(dels)
|
| 124 |
+
except:
|
| 125 |
+
pass
|
| 126 |
+
|
| 127 |
+
return stats
|
| 128 |
+
|
| 129 |
+
def get_commit_diff(self, commit_hash: str) -> str:
|
| 130 |
+
"""Get the full diff for a commit."""
|
| 131 |
+
return self.run_git_command(["show", commit_hash])
|
| 132 |
+
|
| 133 |
+
def classify_commit(self, subject: str, body: str, files_changed: List[str]) -> str:
|
| 134 |
+
"""Classify the type of commit."""
|
| 135 |
+
subject_lower = subject.lower()
|
| 136 |
+
body_lower = body.lower()
|
| 137 |
+
text = subject_lower + " " + body_lower
|
| 138 |
+
|
| 139 |
+
# Keywords for classification
|
| 140 |
+
patterns = {
|
| 141 |
+
"bug_fix": ["fix", "bug", "issue", "error", "crash", "regression", "typo"],
|
| 142 |
+
"feature": ["add", "implement", "create", "new", "support", "feature"],
|
| 143 |
+
"refactor": ["refactor", "cleanup", "simplify", "reorganize", "rename"],
|
| 144 |
+
"documentation": ["doc", "readme", "comment", "documentation"],
|
| 145 |
+
"test": ["test", "spec", "fixture", "mock"],
|
| 146 |
+
"security": ["security", "vulnerability", "exploit", "cve", "auth"],
|
| 147 |
+
"performance": ["perf", "performance", "optimize", " faster", "speed"],
|
| 148 |
+
"revert": ["revert"],
|
| 149 |
+
"merge": ["merge"],
|
| 150 |
+
"chore": ["chore", "bump", "update"]
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
# Check for merge commits
|
| 154 |
+
if len(files_changed) == 0 and "merge" in subject_lower:
|
| 155 |
+
return "merge"
|
| 156 |
+
|
| 157 |
+
# Score each category
|
| 158 |
+
scores = defaultdict(int)
|
| 159 |
+
for category, keywords in patterns.items():
|
| 160 |
+
for keyword in keywords:
|
| 161 |
+
if keyword in text:
|
| 162 |
+
scores[category] += 1
|
| 163 |
+
|
| 164 |
+
# Get the highest scoring category
|
| 165 |
+
if scores:
|
| 166 |
+
best = max(scores, key=scores.get)
|
| 167 |
+
if scores[best] > 0:
|
| 168 |
+
return best
|
| 169 |
+
|
| 170 |
+
return "other"
|
| 171 |
+
|
| 172 |
+
def extract_code_snippets(self, diff: str, max_snippets: int = 3) -> List[Dict[str, Any]]:
|
| 173 |
+
"""Extract code changes from diff."""
|
| 174 |
+
snippets = []
|
| 175 |
+
current_file = None
|
| 176 |
+
current_hunk = []
|
| 177 |
+
in_hunk = False
|
| 178 |
+
|
| 179 |
+
for line in diff.split('\n'):
|
| 180 |
+
# File header
|
| 181 |
+
if line.startswith('+++ b/') or line.startswith('--- a/'):
|
| 182 |
+
if 'dev/null' not in line and 'index ' not in line:
|
| 183 |
+
current_file = line.replace('--- a/', '').replace('+++ b/', '').strip()
|
| 184 |
+
continue
|
| 185 |
+
|
| 186 |
+
# Hunk header
|
| 187 |
+
if line.startswith('@@'):
|
| 188 |
+
if current_file and current_hunk:
|
| 189 |
+
snippets.append({
|
| 190 |
+
"file": current_file,
|
| 191 |
+
"hunk": '\n'.join(current_hunk)
|
| 192 |
+
})
|
| 193 |
+
current_hunk = []
|
| 194 |
+
in_hunk = True
|
| 195 |
+
continue
|
| 196 |
+
|
| 197 |
+
# Added/removed lines
|
| 198 |
+
if in_hunk and (line.startswith('+') or line.startswith('-')):
|
| 199 |
+
current_hunk.append(line)
|
| 200 |
+
|
| 201 |
+
# Don't forget last hunk
|
| 202 |
+
if current_file and current_hunk and len(snippets) < max_snippets:
|
| 203 |
+
snippets.append({
|
| 204 |
+
"file": current_file,
|
| 205 |
+
"hunk": '\n'.join(current_hunk)
|
| 206 |
+
})
|
| 207 |
+
|
| 208 |
+
return snippets[:max_snippets]
|
| 209 |
+
|
| 210 |
+
def analyze_tool_patterns(self, diff: str, commit_message: str) -> Optional[Dict[str, Any]]:
|
| 211 |
+
"""Detect if this commit involves tool usage patterns (e.g., CLI commands, scripts)."""
|
| 212 |
+
# Look for script/command changes
|
| 213 |
+
tool_indicators = {
|
| 214 |
+
"bash": [".sh", "#!/bin/bash", "#!/usr/bin/env bash"],
|
| 215 |
+
"python": [".py", "#!/usr/bin/env python", "import ", "from "],
|
| 216 |
+
"docker": ["Dockerfile", "docker-compose", "docker build"],
|
| 217 |
+
"git": ["git commit", "git push", "git pull", "git branch"],
|
| 218 |
+
"curl": ["curl ", "wget "],
|
| 219 |
+
"npm": ["npm ", "package.json"],
|
| 220 |
+
"pip": ["pip ", "requirements.txt"],
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
detected_tools = []
|
| 224 |
+
for tool, patterns in tool_indicators.items():
|
| 225 |
+
for pattern in patterns:
|
| 226 |
+
if pattern.lower() in diff.lower() or pattern.lower() in commit_message.lower():
|
| 227 |
+
detected_tools.append(tool)
|
| 228 |
+
break
|
| 229 |
+
|
| 230 |
+
if detected_tools:
|
| 231 |
+
return {
|
| 232 |
+
"tools": list(set(detected_tools)),
|
| 233 |
+
"is_automation": True
|
| 234 |
+
}
|
| 235 |
+
return None
|
| 236 |
+
|
| 237 |
+
def extract_pattern_from_commit(self, commit: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
| 238 |
+
"""Extract a pattern from a single commit."""
|
| 239 |
+
stats = self.get_commit_stats(commit["hash"])
|
| 240 |
+
|
| 241 |
+
# Skip if too few files changed (likely merge commit or trivial)
|
| 242 |
+
if stats["files_changed"] == 0:
|
| 243 |
+
return None
|
| 244 |
+
|
| 245 |
+
# Get the diff
|
| 246 |
+
diff = self.get_commit_diff(commit["hash"])
|
| 247 |
+
if not diff:
|
| 248 |
+
return None
|
| 249 |
+
|
| 250 |
+
# Classify the commit
|
| 251 |
+
files_changed = []
|
| 252 |
+
for line in diff.split('\n'):
|
| 253 |
+
if line.startswith('+++ b/') or line.startswith('--- a/'):
|
| 254 |
+
filename = line.replace('--- a/', '').replace('+++ b/', '').strip()
|
| 255 |
+
if 'dev/null' not in filename and 'index ' not in filename:
|
| 256 |
+
files_changed.append(filename)
|
| 257 |
+
|
| 258 |
+
commit_type = self.classify_commit(commit["subject"], commit["body"], files_changed)
|
| 259 |
+
|
| 260 |
+
# Extract code snippets
|
| 261 |
+
code_snippets = self.extract_code_snippets(diff)
|
| 262 |
+
|
| 263 |
+
# Detect tool patterns
|
| 264 |
+
tool_pattern = self.analyze_tool_patterns(diff, commit["subject"])
|
| 265 |
+
|
| 266 |
+
# Build pattern entry
|
| 267 |
+
pattern = {
|
| 268 |
+
"type": "git_commit_pattern",
|
| 269 |
+
"commit_hash": commit["hash"][:8],
|
| 270 |
+
"commit_type": commit_type,
|
| 271 |
+
"author": commit["author"],
|
| 272 |
+
"date": commit["date"],
|
| 273 |
+
"subject": commit["subject"],
|
| 274 |
+
"stats": {
|
| 275 |
+
"files_changed": stats["files_changed"],
|
| 276 |
+
"insertions": stats["insertions"],
|
| 277 |
+
"deletions": stats["deletions"],
|
| 278 |
+
"file_types": dict(stats["file_types"])
|
| 279 |
+
},
|
| 280 |
+
"code_snippets": code_snippets,
|
| 281 |
+
"tool_detection": tool_pattern,
|
| 282 |
+
"pattern_id": hashlib.md5(f"{commit['hash']}{commit['subject']}".encode()).hexdigest()[:12]
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
# Add success indicators (conventional commits, passing tests, etc.)
|
| 286 |
+
pattern["is_successful"] = self._is_successful_commit(commit, diff)
|
| 287 |
+
|
| 288 |
+
return pattern
|
| 289 |
+
|
| 290 |
+
def _is_successful_commit(self, commit: Dict[str, Any], diff: str) -> bool:
|
| 291 |
+
"""Heuristics to determine if a commit represents a successful change."""
|
| 292 |
+
# Check for revert commits
|
| 293 |
+
if commit["subject"].lower().startswith("revert"):
|
| 294 |
+
return False
|
| 295 |
+
|
| 296 |
+
# Check for "fix" keywords followed by non-breaking changes
|
| 297 |
+
subject_lower = commit["subject"].lower()
|
| 298 |
+
if any(kw in subject_lower for kw in ["fix", "resolve", "solve"]):
|
| 299 |
+
return True
|
| 300 |
+
|
| 301 |
+
# Check if it's a refactor that simplifies code (more deletions than additions)
|
| 302 |
+
if "refactor" in subject_lower:
|
| 303 |
+
# We'd need to parse the diff more precisely, but roughly:
|
| 304 |
+
# if deletions > insertions, likely simplification
|
| 305 |
+
pass
|
| 306 |
+
|
| 307 |
+
# Assume most commits are successful unless they're clearly broken
|
| 308 |
+
# (e.g., "WIP", "TODO", "broken", "temp")
|
| 309 |
+
bad_words = ["wip", "todo", "broken", "temp", "hack", "quick fix"]
|
| 310 |
+
if any(word in subject_lower for word in bad_words):
|
| 311 |
+
return False
|
| 312 |
+
|
| 313 |
+
return True
|
| 314 |
+
|
| 315 |
+
def extract_all_patterns(self) -> List[Dict[str, Any]]:
|
| 316 |
+
"""Main extraction routine."""
|
| 317 |
+
print(f"🔍 Analyzing repository: {self.repo_path}")
|
| 318 |
+
|
| 319 |
+
# Check if it's a git repo
|
| 320 |
+
if not (self.repo_path / ".git").exists():
|
| 321 |
+
raise ValueError(f"Not a git repository: {self.repo_path}")
|
| 322 |
+
|
| 323 |
+
branches = self.get_branches()
|
| 324 |
+
print(f" Found {len(branches)} branches")
|
| 325 |
+
|
| 326 |
+
# Get commits from main/master branch first, then others
|
| 327 |
+
main_branches = [b for b in branches if any(main in b for main in ['main', 'master', 'trunk'])]
|
| 328 |
+
if not main_branches:
|
| 329 |
+
main_branches = branches[:1] # Just take first branch if no main
|
| 330 |
+
|
| 331 |
+
all_commits = []
|
| 332 |
+
for branch in main_branches[:3]: # Limit to 3 branches to avoid overload
|
| 333 |
+
print(f" Processing branch: {branch}")
|
| 334 |
+
commits = self.get_commit_history(branch, limit=100) # Limit per branch
|
| 335 |
+
print(f" Found {len(commits)} commits")
|
| 336 |
+
all_commits.extend(commits)
|
| 337 |
+
|
| 338 |
+
# Deduplicate by hash
|
| 339 |
+
seen_hashes = set()
|
| 340 |
+
unique_commits = []
|
| 341 |
+
for commit in all_commits:
|
| 342 |
+
if commit["hash"] not in seen_hashes:
|
| 343 |
+
seen_hashes.add(commit["hash"])
|
| 344 |
+
unique_commits.append(commit)
|
| 345 |
+
|
| 346 |
+
print(f" Total unique commits: {len(unique_commits)}")
|
| 347 |
+
|
| 348 |
+
# Extract patterns
|
| 349 |
+
patterns = []
|
| 350 |
+
for commit in unique_commits:
|
| 351 |
+
try:
|
| 352 |
+
pattern = self.extract_pattern_from_commit(commit)
|
| 353 |
+
if pattern:
|
| 354 |
+
patterns.append(pattern)
|
| 355 |
+
self.stats[pattern["commit_type"]] += 1
|
| 356 |
+
except Exception as e:
|
| 357 |
+
print(f" Warning: Failed to extract pattern from commit {commit['hash'][:8]}: {e}")
|
| 358 |
+
continue
|
| 359 |
+
|
| 360 |
+
print(f"\n✨ Extracted {len(patterns)} patterns")
|
| 361 |
+
print(" By type:")
|
| 362 |
+
for ptype, count in sorted(self.stats.items(), key=lambda x: -x[1]):
|
| 363 |
+
print(f" {ptype}: {count}")
|
| 364 |
+
|
| 365 |
+
self.patterns = patterns
|
| 366 |
+
return patterns
|
| 367 |
+
|
| 368 |
+
def save_patterns(self, output_path: Path):
|
| 369 |
+
"""Save patterns to JSONL file."""
|
| 370 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 371 |
+
|
| 372 |
+
with open(output_path, 'w') as f:
|
| 373 |
+
for pattern in self.patterns:
|
| 374 |
+
f.write(json.dumps(pattern) + '\n')
|
| 375 |
+
|
| 376 |
+
print(f"\n💾 Saved patterns to: {output_path}")
|
| 377 |
+
|
| 378 |
+
# Also save a summary
|
| 379 |
+
summary_path = output_path.with_name(output_path.stem + '_summary.json')
|
| 380 |
+
summary = {
|
| 381 |
+
"total_patterns": len(self.patterns),
|
| 382 |
+
"by_type": dict(self.stats),
|
| 383 |
+
"extraction_date": datetime.now().isoformat(),
|
| 384 |
+
"repo": str(self.repo_path)
|
| 385 |
+
}
|
| 386 |
+
with open(summary_path, 'w') as f:
|
| 387 |
+
json.dump(summary, f, indent=2)
|
| 388 |
+
print(f"📊 Saved summary to: {summary_path}")
|
| 389 |
+
|
| 390 |
+
def main():
|
| 391 |
+
parser = argparse.ArgumentParser(
|
| 392 |
+
description="Extract patterns from Git commit histories for Stack 2.9 training."
|
| 393 |
+
)
|
| 394 |
+
parser.add_argument(
|
| 395 |
+
"--repo",
|
| 396 |
+
type=str,
|
| 397 |
+
default=".",
|
| 398 |
+
help="Path to git repository (default: current directory)"
|
| 399 |
+
)
|
| 400 |
+
parser.add_argument(
|
| 401 |
+
"--output",
|
| 402 |
+
type=str,
|
| 403 |
+
default="training-data/git_patterns.jsonl",
|
| 404 |
+
help="Output file path (JSONL format)"
|
| 405 |
+
)
|
| 406 |
+
parser.add_argument(
|
| 407 |
+
"--min-commits",
|
| 408 |
+
type=int,
|
| 409 |
+
default=5,
|
| 410 |
+
help="Minimum commits per branch to process (default: 5)"
|
| 411 |
+
)
|
| 412 |
+
parser.add_argument(
|
| 413 |
+
"--limit",
|
| 414 |
+
type=int,
|
| 415 |
+
help="Limit number of commits to process (for testing)"
|
| 416 |
+
)
|
| 417 |
+
|
| 418 |
+
args = parser.parse_args()
|
| 419 |
+
|
| 420 |
+
try:
|
| 421 |
+
extractor = GitPatternExtractor(args.repo, min_commits=args.min_commits)
|
| 422 |
+
|
| 423 |
+
if args.limit:
|
| 424 |
+
# Override commit limit by modifying the method
|
| 425 |
+
original_get_commit_history = extractor.get_commit_history
|
| 426 |
+
def limited_get_commit_history(branch, limit=None):
|
| 427 |
+
return original_get_commit_history(branch, limit=args.limit)
|
| 428 |
+
extractor.get_commit_history = limited_get_commit_history
|
| 429 |
+
|
| 430 |
+
patterns = extractor.extract_all_patterns()
|
| 431 |
+
|
| 432 |
+
if patterns:
|
| 433 |
+
extractor.save_patterns(Path(args.output))
|
| 434 |
+
|
| 435 |
+
# Show sample pattern
|
| 436 |
+
print("\n📋 Sample pattern:")
|
| 437 |
+
sample = patterns[0]
|
| 438 |
+
print(f" Type: {sample['commit_type']}")
|
| 439 |
+
print(f" Subject: {sample['subject']}")
|
| 440 |
+
print(f" Files: {sample['stats']['files_changed']} changed")
|
| 441 |
+
print(f" Insertions: {sample['stats']['insertions']}, Deletions: {sample['stats']['deletions']}")
|
| 442 |
+
if sample['tool_detection']:
|
| 443 |
+
print(f" Tools: {', '.join(sample['tool_detection']['tools'])}")
|
| 444 |
+
else:
|
| 445 |
+
print("\n⚠️ No patterns extracted. Try:")
|
| 446 |
+
print(" - Checking that the repository has commit history")
|
| 447 |
+
print(" - Increasing --limit or --min-commits")
|
| 448 |
+
print(" - Using a repository with more substantial commits")
|
| 449 |
+
|
| 450 |
+
except Exception as e:
|
| 451 |
+
print(f"❌ Error: {e}")
|
| 452 |
+
return 1
|
| 453 |
+
|
| 454 |
+
return 0
|
| 455 |
+
|
| 456 |
+
if __name__ == "__main__":
|
| 457 |
+
exit(main())
|
scripts/extract_rtmp_tools.ts
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Extract tool schemas from RTMP for training data
|
| 2 |
+
//
|
| 3 |
+
// This script extracts tool definitions from the RTMP codebase
|
| 4 |
+
// and adds them to stack-2.9's training data catalog.
|
| 5 |
+
|
| 6 |
+
import { readdir, readFile, writeFile } from 'fs/promises'
|
| 7 |
+
import { join, basename } from 'path'
|
| 8 |
+
|
| 9 |
+
const RTMP_TOOLS_DIR = '/Users/walidsobhi/.openclaw/workspace/RTMP/tools'
|
| 10 |
+
const STACK_CATALOG = '/Users/walidsobhi/.openclaw/workspace/stack-2.9/training-data/tools/catalog.json'
|
| 11 |
+
|
| 12 |
+
interface ToolSchema {
|
| 13 |
+
tool: string
|
| 14 |
+
description: string
|
| 15 |
+
hasPrompt: boolean
|
| 16 |
+
hasImplementation: boolean
|
| 17 |
+
inputSchema: Record<string, unknown>
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
async function extractToolSchemas(): Promise<ToolSchema[]> {
|
| 21 |
+
const tools: ToolSchema[] = []
|
| 22 |
+
const toolDirs = await readdir(RTMP_TOOLS_DIR)
|
| 23 |
+
|
| 24 |
+
for (const toolDir of toolDirs) {
|
| 25 |
+
const toolPath = join(RTMP_TOOLS_DIR, toolDir)
|
| 26 |
+
const stat = await readdir(toolPath).then(() => true).catch(() => false)
|
| 27 |
+
|
| 28 |
+
if (!stat) continue
|
| 29 |
+
|
| 30 |
+
// Try to extract tool name and description from tool files
|
| 31 |
+
let description = ''
|
| 32 |
+
let hasPrompt = false
|
| 33 |
+
let hasImplementation = false
|
| 34 |
+
|
| 35 |
+
try {
|
| 36 |
+
// Check for prompt.ts
|
| 37 |
+
const promptPath = join(toolPath, 'prompt.ts')
|
| 38 |
+
const promptContent = await readFile(promptPath, 'utf-8')
|
| 39 |
+
hasPrompt = true
|
| 40 |
+
|
| 41 |
+
// Extract first meaningful comment as description
|
| 42 |
+
const comments = promptContent.match(/\/\*\*[\s\S]*?\*\//g)
|
| 43 |
+
if (comments && comments.length > 0) {
|
| 44 |
+
const comment = comments[0]
|
| 45 |
+
description = comment
|
| 46 |
+
.replace(/\/\*\*|\*\//g, '')
|
| 47 |
+
.replace(/^\s*\*\s?/gm, '')
|
| 48 |
+
.trim()
|
| 49 |
+
.slice(0, 200)
|
| 50 |
+
}
|
| 51 |
+
} catch {
|
| 52 |
+
// No prompt.ts
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
try {
|
| 56 |
+
// Check for implementation files
|
| 57 |
+
const toolFiles = await readdir(toolPath)
|
| 58 |
+
hasImplementation = toolFiles.some(f =>
|
| 59 |
+
f.endsWith('.ts') || f.endsWith('.tsx')
|
| 60 |
+
)
|
| 61 |
+
} catch {
|
| 62 |
+
// Ignore
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
// Format tool name (remove Tool suffix for cleaner names)
|
| 66 |
+
const toolName = toolDir.replace(/Tool$/, '')
|
| 67 |
+
|
| 68 |
+
tools.push({
|
| 69 |
+
tool: toolDir,
|
| 70 |
+
description: description || `${toolName} tool`,
|
| 71 |
+
hasPrompt,
|
| 72 |
+
hasImplementation,
|
| 73 |
+
inputSchema: {}
|
| 74 |
+
})
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
return tools
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
async function main() {
|
| 81 |
+
console.log('Extracting tool schemas from RTMP...')
|
| 82 |
+
|
| 83 |
+
const tools = await extractToolSchemas()
|
| 84 |
+
console.log(`Found ${tools.length} tools`)
|
| 85 |
+
|
| 86 |
+
// Read existing catalog
|
| 87 |
+
let existingTools: ToolSchema[] = []
|
| 88 |
+
try {
|
| 89 |
+
const existingContent = await readFile(STACK_CATALOG, 'utf-8')
|
| 90 |
+
existingTools = JSON.parse(existingContent)
|
| 91 |
+
} catch {
|
| 92 |
+
console.log('No existing catalog found')
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
// Merge with existing (avoid duplicates)
|
| 96 |
+
const existingNames = new Set(existingTools.map(t => t.tool))
|
| 97 |
+
const newTools = tools.filter(t => !existingNames.has(t.tool))
|
| 98 |
+
|
| 99 |
+
console.log(`Adding ${newTools.length} new tools`)
|
| 100 |
+
|
| 101 |
+
// Combine
|
| 102 |
+
const allTools = [...existingTools, ...newTools]
|
| 103 |
+
|
| 104 |
+
// Write updated catalog
|
| 105 |
+
await writeFile(STACK_CATALOG, JSON.stringify(allTools, null, 2))
|
| 106 |
+
console.log(`Updated catalog with ${allTools.length} tools`)
|
| 107 |
+
|
| 108 |
+
// Also print summary
|
| 109 |
+
console.log('\nNew tools added:')
|
| 110 |
+
for (const tool of newTools) {
|
| 111 |
+
console.log(` - ${tool.tool}`)
|
| 112 |
+
}
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
main().catch(console.error)
|
scripts/fuse_lora_adapters.py
ADDED
|
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Fuse LoRA adapters from multiple team members into a unified model.
|
| 4 |
+
|
| 5 |
+
This script demonstrates how to merge multiple LoRA adapters trained on different
|
| 6 |
+
codebases or by different team members, enabling collective intelligence while
|
| 7 |
+
preserving individual specialization.
|
| 8 |
+
|
| 9 |
+
Algorithm: Weighted averaging with similarity-based adaptive weights.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import os
|
| 13 |
+
import json
|
| 14 |
+
import torch
|
| 15 |
+
import argparse
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
from typing import Dict, List, Optional, Tuple
|
| 18 |
+
import numpy as np
|
| 19 |
+
from collections import defaultdict
|
| 20 |
+
|
| 21 |
+
def load_lora_adapter(adapter_path: str, device: str = "cpu") -> Dict[str, torch.Tensor]:
|
| 22 |
+
"""
|
| 23 |
+
Load a LoRA adapter from a safetensors or pytorch bin file.
|
| 24 |
+
|
| 25 |
+
Returns: Dict of parameter name -> tensor
|
| 26 |
+
"""
|
| 27 |
+
adapter_path = Path(adapter_path)
|
| 28 |
+
|
| 29 |
+
# Try safetensors first (faster, no pickle)
|
| 30 |
+
safetensors_path = adapter_path / "adapter_model.safetensors"
|
| 31 |
+
if safetensors_path.exists():
|
| 32 |
+
from safetensors import safe_open
|
| 33 |
+
tensors = {}
|
| 34 |
+
with safe_open(safetensors_path, framework="pt", device=device) as f:
|
| 35 |
+
for key in f.keys():
|
| 36 |
+
tensors[key] = f.get_tensor(key)
|
| 37 |
+
return tensors
|
| 38 |
+
|
| 39 |
+
# Fall back to pytorch bin
|
| 40 |
+
pytorch_path = adapter_path / "adapter_model.bin"
|
| 41 |
+
if pytorch_path.exists():
|
| 42 |
+
tensors = torch.load(pytorch_path, map_location=device, weights_only=True)
|
| 43 |
+
return tensors
|
| 44 |
+
|
| 45 |
+
raise FileNotFoundError(f"No adapter found at {adapter_path}")
|
| 46 |
+
|
| 47 |
+
def compute_adapter_metadata(adapter_path: str) -> Dict[str, Any]:
|
| 48 |
+
"""
|
| 49 |
+
Load adapter metadata (training stats, performance, etc.) if available.
|
| 50 |
+
"""
|
| 51 |
+
metadata_path = Path(adapter_path) / "adapter_metadata.json"
|
| 52 |
+
if metadata_path.exists():
|
| 53 |
+
with open(metadata_path, 'r') as f:
|
| 54 |
+
return json.load(f)
|
| 55 |
+
|
| 56 |
+
# Default metadata
|
| 57 |
+
return {
|
| 58 |
+
"training_examples": 0,
|
| 59 |
+
"validation_score": 0.0,
|
| 60 |
+
"domains": [],
|
| 61 |
+
"team_member": "unknown"
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
def compute_similarity_matrix(
|
| 65 |
+
adapters: List[Tuple[str, Dict[str, torch.Tensor]]],
|
| 66 |
+
sample_keys: Optional[List[str]] = None
|
| 67 |
+
) -> np.ndarray:
|
| 68 |
+
"""
|
| 69 |
+
Compute pairwise similarity between adapters based on weight distributions.
|
| 70 |
+
|
| 71 |
+
Uses cosine similarity of normalized weight vectors.
|
| 72 |
+
"""
|
| 73 |
+
n = len(adapters)
|
| 74 |
+
similarity = np.zeros((n, n))
|
| 75 |
+
|
| 76 |
+
# Get parameter names common to all adapters
|
| 77 |
+
if sample_keys is None:
|
| 78 |
+
common_keys = set(adapters[0][1].keys())
|
| 79 |
+
for _, tensors in adapters[1:]:
|
| 80 |
+
common_keys &= set(tensors.keys())
|
| 81 |
+
sample_keys = list(common_keys)[:100] # Sample up to 100 parameters
|
| 82 |
+
|
| 83 |
+
# Flatten sampled parameters for each adapter
|
| 84 |
+
vectors = []
|
| 85 |
+
for _, tensors in adapters:
|
| 86 |
+
vec_parts = []
|
| 87 |
+
for key in sample_keys:
|
| 88 |
+
if key in tensors:
|
| 89 |
+
# Flatten and normalize
|
| 90 |
+
t = tensors[key].float().flatten()
|
| 91 |
+
norm = torch.norm(t).item()
|
| 92 |
+
if norm > 1e-8:
|
| 93 |
+
t = t / norm
|
| 94 |
+
vec_parts.append(t.numpy())
|
| 95 |
+
else:
|
| 96 |
+
# Missing parameter, use zeros
|
| 97 |
+
shape = tensors[sample_keys[0]].shape if sample_keys[0] in tensors else (1,)
|
| 98 |
+
vec_parts.append(np.zeros(shape).flatten())
|
| 99 |
+
vectors.append(np.concatenate(vec_parts))
|
| 100 |
+
|
| 101 |
+
# Compute cosine similarity
|
| 102 |
+
for i in range(n):
|
| 103 |
+
for j in range(n):
|
| 104 |
+
if i == j:
|
| 105 |
+
similarity[i, j] = 1.0
|
| 106 |
+
else:
|
| 107 |
+
v1, v2 = vectors[i], vectors[j]
|
| 108 |
+
sim = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2) + 1e-8)
|
| 109 |
+
similarity[i, j] = sim
|
| 110 |
+
|
| 111 |
+
return similarity
|
| 112 |
+
|
| 113 |
+
def compute_adaptive_weights(
|
| 114 |
+
similarities: np.ndarray,
|
| 115 |
+
metadata: List[Dict[str, Any]],
|
| 116 |
+
base_config: Dict[str, float]
|
| 117 |
+
) -> np.ndarray:
|
| 118 |
+
"""
|
| 119 |
+
Compute fusion weights using adaptive strategy:
|
| 120 |
+
|
| 121 |
+
w_i = (performance_i * domain_overlap_i) / (sum(performance_j * domain_overlap_j) + epsilon)
|
| 122 |
+
|
| 123 |
+
With similarity-based adjustments:
|
| 124 |
+
- Higher weight adapters that are similar to each other get boosted
|
| 125 |
+
- Diverse adapters get balanced contributions
|
| 126 |
+
"""
|
| 127 |
+
n = len(metadata)
|
| 128 |
+
weights = np.zeros(n)
|
| 129 |
+
|
| 130 |
+
# Base weights from performance
|
| 131 |
+
base_weights = np.array([
|
| 132 |
+
meta.get("validation_score", 0.0) *
|
| 133 |
+
meta.get("training_examples", 1) / 1000.0 # Normalize by dataset size
|
| 134 |
+
for meta in metadata
|
| 135 |
+
])
|
| 136 |
+
|
| 137 |
+
# Domain overlap weights
|
| 138 |
+
domain_weights = np.zeros(n)
|
| 139 |
+
all_domains = defaultdict(int)
|
| 140 |
+
for i, meta in enumerate(metadata):
|
| 141 |
+
for domain in meta.get("domains", []):
|
| 142 |
+
all_domains[domain] += 1
|
| 143 |
+
|
| 144 |
+
for i, meta in enumerate(metadata):
|
| 145 |
+
overlap = 0.0
|
| 146 |
+
for domain in meta.get("domains", []):
|
| 147 |
+
# Rare domains get higher weight
|
| 148 |
+
overlap += 1.0 / all_domains[domain]
|
| 149 |
+
domain_weights[i] = overlap if overlap > 0 else 1.0
|
| 150 |
+
|
| 151 |
+
# Combine base weights with domain weights
|
| 152 |
+
raw_weights = base_weights * domain_weights
|
| 153 |
+
|
| 154 |
+
# Apply similarity-based smoothing
|
| 155 |
+
# If two adapters are very similar, distribute weight more evenly
|
| 156 |
+
similarity_threshold = base_config.get("similarity_threshold", 0.9)
|
| 157 |
+
similarity_damping = base_config.get("similarity_damping", 0.3)
|
| 158 |
+
|
| 159 |
+
for i in range(n):
|
| 160 |
+
for j in range(i+1, n):
|
| 161 |
+
if similarities[i, j] > similarity_threshold:
|
| 162 |
+
# Too similar, dampen differences
|
| 163 |
+
avg_weight = (raw_weights[i] + raw_weights[j]) / 2
|
| 164 |
+
raw_weights[i] = raw_weights[i] * (1 - similarity_damping) + avg_weight * similarity_damping
|
| 165 |
+
raw_weights[j] = raw_weights[j] * (1 - similarity_damping) + avg_weight * similarity_damping
|
| 166 |
+
|
| 167 |
+
# Normalize
|
| 168 |
+
total = np.sum(raw_weights)
|
| 169 |
+
if total > 0:
|
| 170 |
+
weights = raw_weights / total
|
| 171 |
+
else:
|
| 172 |
+
weights = np.ones(n) / n
|
| 173 |
+
|
| 174 |
+
return weights
|
| 175 |
+
|
| 176 |
+
def fuse_adapters(
|
| 177 |
+
adapter_paths: List[str],
|
| 178 |
+
output_path: str,
|
| 179 |
+
config: Optional[Dict] = None
|
| 180 |
+
) -> Tuple[Path, Dict]:
|
| 181 |
+
"""
|
| 182 |
+
Fuse multiple LoRA adapters into a single adapter.
|
| 183 |
+
|
| 184 |
+
Args:
|
| 185 |
+
adapter_paths: List of paths to adapter directories
|
| 186 |
+
output_path: Where to save the fused adapter
|
| 187 |
+
config: Fusion configuration (weights, similarity thresholds, etc.)
|
| 188 |
+
|
| 189 |
+
Returns:
|
| 190 |
+
Path to fused adapter, fusion metadata
|
| 191 |
+
"""
|
| 192 |
+
if config is None:
|
| 193 |
+
config = {
|
| 194 |
+
"fusion_method": "weighted_average",
|
| 195 |
+
"similarity_threshold": 0.9,
|
| 196 |
+
"similarity_damping": 0.3,
|
| 197 |
+
"normalize_weights": True,
|
| 198 |
+
"clip_diff": 2.0 # Clip weight differences to avoid extreme values
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
print(f"🔗 Fusing {len(adapter_paths)} adapters...")
|
| 202 |
+
|
| 203 |
+
# Load all adapters
|
| 204 |
+
adapters = []
|
| 205 |
+
metadata_list = []
|
| 206 |
+
|
| 207 |
+
for path in adapter_paths:
|
| 208 |
+
print(f" Loading: {Path(path).name}")
|
| 209 |
+
try:
|
| 210 |
+
tensors = load_lora_adapter(path)
|
| 211 |
+
meta = compute_adapter_metadata(path)
|
| 212 |
+
adapters.append((path, tensors))
|
| 213 |
+
metadata_list.append(meta)
|
| 214 |
+
except Exception as e:
|
| 215 |
+
print(f" ⚠️ Skipped {path}: {e}")
|
| 216 |
+
|
| 217 |
+
if len(adapters) < 2:
|
| 218 |
+
raise ValueError("Need at least 2 adapters to fuse")
|
| 219 |
+
|
| 220 |
+
# Get common parameter keys
|
| 221 |
+
common_keys = set(adapters[0][1].keys())
|
| 222 |
+
for _, tensors in adapters[1:]:
|
| 223 |
+
common_keys &= set(tensors.keys())
|
| 224 |
+
|
| 225 |
+
print(f" Common parameters: {len(common_keys)}")
|
| 226 |
+
|
| 227 |
+
# Compute similarities
|
| 228 |
+
print(" Computing adapter similarities...")
|
| 229 |
+
# Sample parameters for similarity computation
|
| 230 |
+
sample_keys = list(common_keys)[:min(100, len(common_keys))]
|
| 231 |
+
similarities = compute_similarity_matrix(adapters, sample_keys)
|
| 232 |
+
|
| 233 |
+
# Compute adaptive weights
|
| 234 |
+
weights = compute_adaptive_weights(similarities, metadata_list, config)
|
| 235 |
+
|
| 236 |
+
print(" Fusion weights:")
|
| 237 |
+
for i, (path, _) in enumerate(adapters):
|
| 238 |
+
member = metadata_list[i].get("team_member", f"adapter_{i}")
|
| 239 |
+
print(f" {member}: {weights[i]:.3f}")
|
| 240 |
+
|
| 241 |
+
# Fuse weights
|
| 242 |
+
print(" Fusing weights...")
|
| 243 |
+
fused_tensors = {}
|
| 244 |
+
|
| 245 |
+
for key in common_keys:
|
| 246 |
+
# Start with zero tensor
|
| 247 |
+
fused = None
|
| 248 |
+
|
| 249 |
+
for idx, (_, tensors) in enumerate(adapters):
|
| 250 |
+
weight = weights[idx]
|
| 251 |
+
tensor = tensors[key].float()
|
| 252 |
+
|
| 253 |
+
if fused is None:
|
| 254 |
+
fused = tensor * weight
|
| 255 |
+
else:
|
| 256 |
+
fused += tensor * weight
|
| 257 |
+
|
| 258 |
+
# Apply clipping if configured
|
| 259 |
+
if config["clip_diff"] > 0:
|
| 260 |
+
# Clip extreme values relative to first adapter
|
| 261 |
+
reference = adapters[0][1][key].float()
|
| 262 |
+
max_diff = torch.max(torch.abs(fused - reference)) * config["clip_diff"]
|
| 263 |
+
# This is a simple heuristic - could be improved
|
| 264 |
+
fused = torch.clamp(fused,
|
| 265 |
+
reference - max_diff,
|
| 266 |
+
reference + max_diff)
|
| 267 |
+
|
| 268 |
+
fused_tensors[key] = fused.half() # Convert back to half precision
|
| 269 |
+
|
| 270 |
+
# Save fused adapter
|
| 271 |
+
output_path = Path(output_path)
|
| 272 |
+
output_path.mkdir(parents=True, exist_ok=True)
|
| 273 |
+
|
| 274 |
+
# Save tensors
|
| 275 |
+
fused_file = output_path / "adapter_model.safetensors"
|
| 276 |
+
try:
|
| 277 |
+
from safetensors import save_file
|
| 278 |
+
save_file(fused_tensors, str(fused_file))
|
| 279 |
+
except ImportError:
|
| 280 |
+
# Fallback to pytorch
|
| 281 |
+
torch.save(fused_tensors, output_path / "adapter_model.bin")
|
| 282 |
+
|
| 283 |
+
# Save metadata
|
| 284 |
+
fusion_metadata = {
|
| 285 |
+
"fusion_date": "2025-04-03", # Would use datetime.now()
|
| 286 |
+
"source_adapters": [
|
| 287 |
+
{
|
| 288 |
+
"path": path,
|
| 289 |
+
"team_member": meta.get("team_member", "unknown"),
|
| 290 |
+
"validation_score": meta.get("validation_score", 0.0),
|
| 291 |
+
"domains": meta.get("domains", []),
|
| 292 |
+
"weight": float(weights[i])
|
| 293 |
+
}
|
| 294 |
+
for i, (path, meta) in enumerate(zip([p for p, _ in adapters], metadata_list))
|
| 295 |
+
],
|
| 296 |
+
"fusion_config": config,
|
| 297 |
+
"similarity_matrix": similarities.tolist(),
|
| 298 |
+
"total_parameters": len(common_keys)
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
with open(output_path / "fusion_metadata.json", 'w') as f:
|
| 302 |
+
json.dump(fusion_metadata, f, indent=2)
|
| 303 |
+
|
| 304 |
+
print(f"\n✅ Fused adapter saved to: {output_path}")
|
| 305 |
+
print(f" Parameters: {len(common_keys)}")
|
| 306 |
+
print(f" Used samples: {sum(m.get('training_examples', 0) for m in metadata_list)}")
|
| 307 |
+
|
| 308 |
+
return output_path, fusion_metadata
|
| 309 |
+
|
| 310 |
+
def validate_fusion(
|
| 311 |
+
fused_adapter_path: str,
|
| 312 |
+
test_cases_path: Optional[str] = None,
|
| 313 |
+
base_model: str = "Qwen/Qwen2.5-Coder-32B"
|
| 314 |
+
) -> Dict[str, float]:
|
| 315 |
+
"""
|
| 316 |
+
Validate the fused adapter against test cases.
|
| 317 |
+
|
| 318 |
+
Returns: Dictionary with validation metrics
|
| 319 |
+
"""
|
| 320 |
+
print("🔍 Validating fused adapter...")
|
| 321 |
+
|
| 322 |
+
# This would integrate with the evaluation framework
|
| 323 |
+
# For now, return mock metrics
|
| 324 |
+
metrics = {
|
| 325 |
+
"score": 0.0,
|
| 326 |
+
"test_cases": 0,
|
| 327 |
+
"passed": 0
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
if test_cases_path:
|
| 331 |
+
# Load and run test cases
|
| 332 |
+
test_cases_path = Path(test_cases_path)
|
| 333 |
+
if test_cases_path.exists():
|
| 334 |
+
# Would implement actual validation
|
| 335 |
+
pass
|
| 336 |
+
|
| 337 |
+
print(f" Validation complete (placeholder)")
|
| 338 |
+
return metrics
|
| 339 |
+
|
| 340 |
+
def main():
|
| 341 |
+
parser = argparse.ArgumentParser(
|
| 342 |
+
description="Fuse LoRA adapters from multiple team members."
|
| 343 |
+
)
|
| 344 |
+
parser.add_argument(
|
| 345 |
+
"--adapters",
|
| 346 |
+
nargs='+',
|
| 347 |
+
required=True,
|
| 348 |
+
help="Paths to adapter directories (each with adapter_model.safetensors)"
|
| 349 |
+
)
|
| 350 |
+
parser.add_argument(
|
| 351 |
+
"--output",
|
| 352 |
+
type=str,
|
| 353 |
+
default="fused-adapter",
|
| 354 |
+
help="Output directory for fused adapter"
|
| 355 |
+
)
|
| 356 |
+
parser.add_argument(
|
| 357 |
+
"--config",
|
| 358 |
+
type=str,
|
| 359 |
+
help="JSON config file with fusion parameters"
|
| 360 |
+
)
|
| 361 |
+
parser.add_argument(
|
| 362 |
+
"--validate",
|
| 363 |
+
action="store_true",
|
| 364 |
+
help="Run validation after fusion"
|
| 365 |
+
)
|
| 366 |
+
parser.add_argument(
|
| 367 |
+
"--base-model",
|
| 368 |
+
type=str,
|
| 369 |
+
default="Qwen/Qwen2.5-Coder-32B",
|
| 370 |
+
help="Base model identifier"
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
args = parser.parse_args()
|
| 374 |
+
|
| 375 |
+
# Load config if provided
|
| 376 |
+
config = None
|
| 377 |
+
if args.config:
|
| 378 |
+
with open(args.config, 'r') as f:
|
| 379 |
+
config = json.load(f)
|
| 380 |
+
|
| 381 |
+
# Fuse adapters
|
| 382 |
+
try:
|
| 383 |
+
output_path, metadata = fuse_adapters(
|
| 384 |
+
args.adapters,
|
| 385 |
+
args.output,
|
| 386 |
+
config or {}
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
# Validate if requested
|
| 390 |
+
if args.validate:
|
| 391 |
+
metrics = validate_fusion(str(output_path), base_model=args.base_model)
|
| 392 |
+
print("\n📊 Validation Metrics:")
|
| 393 |
+
for k, v in metrics.items():
|
| 394 |
+
print(f" {k}: {v}")
|
| 395 |
+
|
| 396 |
+
# Print summary
|
| 397 |
+
print("\n📈 Fusion Summary:")
|
| 398 |
+
print(f" Total adapters: {len(args.adapters)}")
|
| 399 |
+
print(f" Output: {output_path}")
|
| 400 |
+
print(f" Members:", ", ".join(
|
| 401 |
+
m["team_member"] for m in metadata["source_adapters"]
|
| 402 |
+
))
|
| 403 |
+
|
| 404 |
+
except Exception as e:
|
| 405 |
+
print(f"❌ Fusion failed: {e}")
|
| 406 |
+
import traceback
|
| 407 |
+
traceback.print_exc()
|
| 408 |
+
return 1
|
| 409 |
+
|
| 410 |
+
return 0
|
| 411 |
+
|
| 412 |
+
if __name__ == "__main__":
|
| 413 |
+
exit(main())
|
scripts/generate_from_rtmp.ts
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Generate synthetic training data from RTMP codebase
|
| 2 |
+
//
|
| 3 |
+
// Extracts code examples and patterns from RTMP to create training data
|
| 4 |
+
// for stack-2.9.
|
| 5 |
+
|
| 6 |
+
import { readdir, readFile, writeFile, mkdir } from 'fs/promises'
|
| 7 |
+
import { join, basename } from 'path'
|
| 8 |
+
|
| 9 |
+
const RTMP_DIR = '/Users/walidsobhi/.openclaw/workspace/RTMP'
|
| 10 |
+
const OUTPUT_DIR = '/Users/walidsobhi/.openclaw/workspace/stack-2.9/training-data/src-derived'
|
| 11 |
+
|
| 12 |
+
interface TrainingExample {
|
| 13 |
+
messages: Array<{
|
| 14 |
+
role: string
|
| 15 |
+
content: string
|
| 16 |
+
}>
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
const SYSTEM_PROMPT = `You are Stack, an AI coding assistant based on Claude Code. You help with programming tasks, answer questions, use tools when needed, and provide code examples.`
|
| 20 |
+
|
| 21 |
+
async function extractCodeExamples(): Promise<TrainingExample[]> {
|
| 22 |
+
const examples: TrainingExample[] = []
|
| 23 |
+
|
| 24 |
+
// Extract from RTMP tools prompts - these are good instruction examples
|
| 25 |
+
const toolsDir = join(RTMP_DIR, 'tools')
|
| 26 |
+
const toolDirs = await readdir(toolsDir).catch(() => [])
|
| 27 |
+
|
| 28 |
+
for (const toolDir of toolDirs.slice(0, 10)) { // Limit to 10 tools
|
| 29 |
+
const promptPath = join(toolsDir, toolDir, 'prompt.ts')
|
| 30 |
+
try {
|
| 31 |
+
const content = await readFile(promptPath, 'utf-8')
|
| 32 |
+
|
| 33 |
+
// Extract useful code patterns
|
| 34 |
+
const toolName = toolDir.replace('Tool', '')
|
| 35 |
+
|
| 36 |
+
// Create example from tool usage
|
| 37 |
+
examples.push({
|
| 38 |
+
messages: [
|
| 39 |
+
{ role: 'system', content: SYSTEM_PROMPT },
|
| 40 |
+
{
|
| 41 |
+
role: 'user',
|
| 42 |
+
content: `How do I use the ${toolName} tool?`
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
role: 'assistant',
|
| 46 |
+
content: `The ${toolName} tool allows you to ${getToolDescription(toolName)}. Here's how to use it:\n\n\`\`\`\n// Example usage\n// See the tool source for complete documentation\n\`\`\`\n\nKey features:\n- Feature 1\n- Feature 2`
|
| 47 |
+
}
|
| 48 |
+
]
|
| 49 |
+
})
|
| 50 |
+
} catch {
|
| 51 |
+
// Skip if no prompt
|
| 52 |
+
}
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
// Extract from RTMP commands
|
| 56 |
+
const commandsDir = join(RTMP_DIR, 'commands')
|
| 57 |
+
try {
|
| 58 |
+
const commandDirs = await readdir(commandsDir)
|
| 59 |
+
for (const cmd of commandDirs.slice(0, 5)) {
|
| 60 |
+
examples.push({
|
| 61 |
+
messages: [
|
| 62 |
+
{ role: 'system', content: SYSTEM_PROMPT },
|
| 63 |
+
{
|
| 64 |
+
role: 'user',
|
| 65 |
+
content: `How do I use the /${cmd} command?`
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
role: 'assistant',
|
| 69 |
+
content: `The /${cmd} command provides ${cmd} functionality. Use it by typing /${cmd} in your prompt.`
|
| 70 |
+
}
|
| 71 |
+
]
|
| 72 |
+
})
|
| 73 |
+
}
|
| 74 |
+
} catch {
|
| 75 |
+
// Ignore
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
return examples
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
function getToolDescription(toolName: string): string {
|
| 82 |
+
const descriptions: Record<string, string> = {
|
| 83 |
+
'Bash': 'execute shell commands and get output',
|
| 84 |
+
'FileRead': 'read files from the filesystem',
|
| 85 |
+
'FileWrite': 'write content to files',
|
| 86 |
+
'FileEdit': 'make targeted edits to files',
|
| 87 |
+
'Glob': 'find files matching patterns',
|
| 88 |
+
'Grep': 'search for text in files',
|
| 89 |
+
'LSP': 'get language server features like autocomplete',
|
| 90 |
+
'MCP': 'use Model Context Protocol servers',
|
| 91 |
+
'Task': 'create and manage task lists',
|
| 92 |
+
'Todo': 'track tasks and todo items'
|
| 93 |
+
}
|
| 94 |
+
return descriptions[toolName] || 'perform its designated function'
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
async function main() {
|
| 98 |
+
console.log('Generating synthetic training data from RTMP...')
|
| 99 |
+
|
| 100 |
+
// Ensure output directory exists
|
| 101 |
+
await mkdir(OUTPUT_DIR, { recursive: true }).catch(() => {})
|
| 102 |
+
|
| 103 |
+
const examples = await extractCodeExamples()
|
| 104 |
+
console.log(`Generated ${examples.length} training examples`)
|
| 105 |
+
|
| 106 |
+
// Write to JSONL
|
| 107 |
+
const outputPath = join(OUTPUT_DIR, 'rtmp_examples.jsonl')
|
| 108 |
+
const content = examples.map(e => JSON.stringify(e)).join('\n')
|
| 109 |
+
await writeFile(outputPath, content)
|
| 110 |
+
|
| 111 |
+
console.log(`Written to ${outputPath}`)
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
main().catch(console.error)
|
src/examples/voice-integration.ts
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Voice Integration Example - Demonstrates voice tools with Stack 2.9
|
| 2 |
+
//
|
| 3 |
+
// This example shows how to:
|
| 4 |
+
// 1. Initialize the voice client
|
| 5 |
+
// 2. Clone a voice from audio sample
|
| 6 |
+
// 3. Record voice commands
|
| 7 |
+
// 4. Synthesize speech responses
|
| 8 |
+
|
| 9 |
+
import {
|
| 10 |
+
initVoiceClient,
|
| 11 |
+
VoiceRecordingTool,
|
| 12 |
+
VoiceSynthesisTool,
|
| 13 |
+
VoiceCloneTool,
|
| 14 |
+
VoiceStatusTool,
|
| 15 |
+
} from '../voice/index.js'
|
| 16 |
+
import { log } from '../utils/logger.js'
|
| 17 |
+
|
| 18 |
+
/**
|
| 19 |
+
* Example: Initialize voice client and check status
|
| 20 |
+
*/
|
| 21 |
+
async function checkVoiceStatus() {
|
| 22 |
+
log('Checking voice service status...')
|
| 23 |
+
|
| 24 |
+
// Initialize client (or use environment variables)
|
| 25 |
+
const client = initVoiceClient({
|
| 26 |
+
apiUrl: process.env.VOICE_API_URL ?? 'http://localhost:8000',
|
| 27 |
+
})
|
| 28 |
+
|
| 29 |
+
const statusTool = new VoiceStatusTool()
|
| 30 |
+
const result = await statusTool.execute()
|
| 31 |
+
|
| 32 |
+
log('Voice status:', result)
|
| 33 |
+
return result
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
/**
|
| 37 |
+
* Example: Clone a voice from audio sample
|
| 38 |
+
*/
|
| 39 |
+
async function cloneVoiceExample() {
|
| 40 |
+
log('Cloning voice from sample...')
|
| 41 |
+
|
| 42 |
+
const client = initVoiceClient({
|
| 43 |
+
apiUrl: process.env.VOICE_API_URL ?? 'http://localhost:8000',
|
| 44 |
+
})
|
| 45 |
+
|
| 46 |
+
const cloneTool = new VoiceCloneTool()
|
| 47 |
+
const result = await cloneTool.execute({
|
| 48 |
+
voiceName: 'my_voice',
|
| 49 |
+
audioPath: './audio_samples/my_voice.wav',
|
| 50 |
+
})
|
| 51 |
+
|
| 52 |
+
log('Clone result:', result)
|
| 53 |
+
return result
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
/**
|
| 57 |
+
* Example: Record voice command
|
| 58 |
+
*/
|
| 59 |
+
async function recordVoiceCommand() {
|
| 60 |
+
log('Starting voice recording...')
|
| 61 |
+
|
| 62 |
+
const recordingTool = new VoiceRecordingTool()
|
| 63 |
+
|
| 64 |
+
// Record with max 30 second duration
|
| 65 |
+
const result = await recordingTool.execute({ maxDuration: 30000 })
|
| 66 |
+
|
| 67 |
+
if (result.success) {
|
| 68 |
+
const data = result.data as { duration?: number; sampleRate?: number } | undefined
|
| 69 |
+
log('Recording captured:', {
|
| 70 |
+
duration: data?.duration,
|
| 71 |
+
sampleRate: data?.sampleRate,
|
| 72 |
+
})
|
| 73 |
+
} else {
|
| 74 |
+
log('Recording failed:', result.error)
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
return result
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
/**
|
| 81 |
+
* Example: Synthesize speech response
|
| 82 |
+
*/
|
| 83 |
+
async function synthesizeResponse(text: string) {
|
| 84 |
+
log(`Synthesizing: "${text}"`)
|
| 85 |
+
|
| 86 |
+
const client = initVoiceClient({
|
| 87 |
+
apiUrl: process.env.VOICE_API_URL ?? 'http://localhost:8000',
|
| 88 |
+
})
|
| 89 |
+
|
| 90 |
+
const synthTool = new VoiceSynthesisTool()
|
| 91 |
+
const result = await synthTool.execute({
|
| 92 |
+
text,
|
| 93 |
+
voiceName: 'my_voice',
|
| 94 |
+
})
|
| 95 |
+
|
| 96 |
+
if (result.success) {
|
| 97 |
+
log('Audio generated successfully')
|
| 98 |
+
} else {
|
| 99 |
+
log('Synthesis failed:', result.error)
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
return result
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
/**
|
| 106 |
+
* Example: Complete voice conversation workflow
|
| 107 |
+
*/
|
| 108 |
+
async function voiceConversation() {
|
| 109 |
+
// 1. Check status
|
| 110 |
+
await checkVoiceStatus()
|
| 111 |
+
|
| 112 |
+
// 2. Record user's voice command
|
| 113 |
+
const recording = await recordVoiceCommand()
|
| 114 |
+
if (!recording.success) {
|
| 115 |
+
log('Cannot proceed without voice input')
|
| 116 |
+
return
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
// 3. In real implementation, send audio to STT service
|
| 120 |
+
// const text = await transcribe(recording.data.audio)
|
| 121 |
+
|
| 122 |
+
// 4. Process with Stack 2.9 (simulated)
|
| 123 |
+
const responseText = 'I have analyzed your code and found 3 potential improvements.'
|
| 124 |
+
|
| 125 |
+
// 5. Synthesize response
|
| 126 |
+
await synthesizeResponse(responseText)
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
// Run examples if this is the main module
|
| 130 |
+
if (import.meta.url === `file://${process.argv[1]}`) {
|
| 131 |
+
log('Running voice integration examples...')
|
| 132 |
+
|
| 133 |
+
// Check status
|
| 134 |
+
await checkVoiceStatus()
|
| 135 |
+
|
| 136 |
+
// Uncomment to run other examples:
|
| 137 |
+
// await cloneVoiceExample()
|
| 138 |
+
// await recordVoiceCommand()
|
| 139 |
+
// await synthesizeResponse('Hello, this is a test response.')
|
| 140 |
+
// await voiceConversation()
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
export default {
|
| 144 |
+
checkVoiceStatus,
|
| 145 |
+
cloneVoiceExample,
|
| 146 |
+
recordVoiceCommand,
|
| 147 |
+
synthesizeResponse,
|
| 148 |
+
voiceConversation,
|
| 149 |
+
}
|
src/indexing/CodeIndexer.ts
ADDED
|
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Code Indexer - Semantic code search for Stack 2.9
|
| 2 |
+
//
|
| 3 |
+
// Provides RAG capabilities by indexing code and enabling semantic search.
|
| 4 |
+
|
| 5 |
+
import { readdir, readFile, writeFile, stat } from 'fs/promises'
|
| 6 |
+
import { join, relative, extname } from 'path'
|
| 7 |
+
import crypto from 'crypto'
|
| 8 |
+
|
| 9 |
+
// Types
|
| 10 |
+
export interface CodeChunk {
|
| 11 |
+
id: string
|
| 12 |
+
filePath: string
|
| 13 |
+
content: string
|
| 14 |
+
startLine: number
|
| 15 |
+
endLine: number
|
| 16 |
+
language: string
|
| 17 |
+
chunkType: 'function' | 'class' | 'file' | 'block'
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
export interface CodeIndex {
|
| 21 |
+
version: string
|
| 22 |
+
projectPath: string
|
| 23 |
+
indexedAt: string
|
| 24 |
+
chunks: (CodeChunk & { embedding: number[] })[]
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
export interface CodeSearchResult {
|
| 28 |
+
chunk: Omit<CodeChunk, 'id'>
|
| 29 |
+
score: number
|
| 30 |
+
filePath: string
|
| 31 |
+
startLine: number
|
| 32 |
+
endLine: number
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
// Configuration
|
| 36 |
+
const CHUNK_SIZE = 2000
|
| 37 |
+
const TOP_K = 5
|
| 38 |
+
|
| 39 |
+
// Supported file extensions
|
| 40 |
+
const CODE_EXTENSIONS = new Set([
|
| 41 |
+
'.ts', '.tsx', '.js', '.jsx', '.py', '.go', '.rs', '.java',
|
| 42 |
+
'.c', '.cpp', '.h', '.hpp', '.cs', '.rb', '.php', '.swift',
|
| 43 |
+
'.kt', '.scala', '.vue', '.svelte', '.json', '.yaml', '.yml',
|
| 44 |
+
'.md', '.txt', '.sh', '.bash', '.zsh',
|
| 45 |
+
])
|
| 46 |
+
|
| 47 |
+
// Directories to skip
|
| 48 |
+
const SKIP_DIRS = new Set([
|
| 49 |
+
'node_modules', '.git', 'dist', 'build', 'out', '__pycache__',
|
| 50 |
+
'.next', '.nuxt', '.svelte-kit', 'coverage', '.cache',
|
| 51 |
+
'.venv', 'venv', 'env', '.env', 'vendor',
|
| 52 |
+
])
|
| 53 |
+
|
| 54 |
+
// Language detection
|
| 55 |
+
function getLanguage(filePath: string): string {
|
| 56 |
+
const ext = extname(filePath).toLowerCase()
|
| 57 |
+
const langMap: Record<string, string> = {
|
| 58 |
+
'.ts': 'typescript',
|
| 59 |
+
'.tsx': 'typescript',
|
| 60 |
+
'.js': 'javascript',
|
| 61 |
+
'.jsx': 'javascript',
|
| 62 |
+
'.py': 'python',
|
| 63 |
+
'.go': 'go',
|
| 64 |
+
'.rs': 'rust',
|
| 65 |
+
'.java': 'java',
|
| 66 |
+
'.c': 'c',
|
| 67 |
+
'.cpp': 'cpp',
|
| 68 |
+
'.h': 'c',
|
| 69 |
+
'.hpp': 'cpp',
|
| 70 |
+
'.cs': 'csharp',
|
| 71 |
+
'.rb': 'ruby',
|
| 72 |
+
'.php': 'php',
|
| 73 |
+
'.swift': 'swift',
|
| 74 |
+
'.kt': 'kotlin',
|
| 75 |
+
'.scala': 'scala',
|
| 76 |
+
'.vue': 'vue',
|
| 77 |
+
'.svelte': 'svelte',
|
| 78 |
+
'.json': 'json',
|
| 79 |
+
'.yaml': 'yaml',
|
| 80 |
+
'.yml': 'yaml',
|
| 81 |
+
'.md': 'markdown',
|
| 82 |
+
'.sh': 'bash',
|
| 83 |
+
'.bash': 'bash',
|
| 84 |
+
'.zsh': 'zsh',
|
| 85 |
+
}
|
| 86 |
+
return langMap[ext] ?? 'text'
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
// Simple hash-based embedding (for offline use)
|
| 90 |
+
// In production, use @xenova/transformers or OpenAI embeddings
|
| 91 |
+
function generateEmbedding(content: string): number[] {
|
| 92 |
+
const hash = crypto.createHash('sha256').update(content).digest()
|
| 93 |
+
const embedding: number[] = []
|
| 94 |
+
for (let i = 0; i < 256; i++) {
|
| 95 |
+
embedding.push((hash[i % hash.length] ?? 0) / 255)
|
| 96 |
+
}
|
| 97 |
+
return embedding
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
// Cosine similarity
|
| 101 |
+
function cosineSimilarity(a: number[], b: number[]): number {
|
| 102 |
+
let dot = 0
|
| 103 |
+
let normA = 0
|
| 104 |
+
let normB = 0
|
| 105 |
+
for (let i = 0; i < a.length; i++) {
|
| 106 |
+
dot += a[i] * b[i]
|
| 107 |
+
normA += a[i] * a[i]
|
| 108 |
+
normB += b[i] * b[i]
|
| 109 |
+
}
|
| 110 |
+
return dot / (Math.sqrt(normA) * Math.sqrt(normB) || 1)
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
// Split code into chunks
|
| 114 |
+
function splitIntoChunks(content: string, filePath: string, language: string): CodeChunk[] {
|
| 115 |
+
const lines = content.split('\n')
|
| 116 |
+
const chunks: CodeChunk[] = []
|
| 117 |
+
let chunkLines: string[] = []
|
| 118 |
+
let startLine = 1
|
| 119 |
+
|
| 120 |
+
for (let i = 0; i < lines.length; i++) {
|
| 121 |
+
const line = lines[i]
|
| 122 |
+
chunkLines.push(line)
|
| 123 |
+
|
| 124 |
+
// Create chunk when size exceeds limit or at logical boundaries
|
| 125 |
+
const shouldChunk = chunkLines.join('\n').length > CHUNK_SIZE ||
|
| 126 |
+
(line.match(/^(function|class|const|let|var|def|import|export|public|private)/) && chunkLines.length > 5)
|
| 127 |
+
|
| 128 |
+
if (shouldChunk && chunkLines.length > 3) {
|
| 129 |
+
chunks.push({
|
| 130 |
+
id: crypto.randomUUID(),
|
| 131 |
+
filePath,
|
| 132 |
+
content: chunkLines.join('\n'),
|
| 133 |
+
startLine,
|
| 134 |
+
endLine: i + 1,
|
| 135 |
+
language,
|
| 136 |
+
chunkType: 'block',
|
| 137 |
+
})
|
| 138 |
+
chunkLines = []
|
| 139 |
+
startLine = i + 2
|
| 140 |
+
}
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
// Add remaining as final chunk
|
| 144 |
+
if (chunkLines.length > 0) {
|
| 145 |
+
chunks.push({
|
| 146 |
+
id: crypto.randomUUID(),
|
| 147 |
+
filePath,
|
| 148 |
+
content: chunkLines.join('\n'),
|
| 149 |
+
startLine,
|
| 150 |
+
endLine: lines.length,
|
| 151 |
+
language,
|
| 152 |
+
chunkType: 'file',
|
| 153 |
+
})
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
return chunks
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
// Walk directory and collect files
|
| 160 |
+
async function* walkDirectory(dir: string): AsyncGenerator<string> {
|
| 161 |
+
try {
|
| 162 |
+
const entries = await readdir(dir, { withFileTypes: true })
|
| 163 |
+
|
| 164 |
+
for (const entry of entries) {
|
| 165 |
+
const fullPath = join(dir, entry.name)
|
| 166 |
+
|
| 167 |
+
if (entry.isDirectory()) {
|
| 168 |
+
if (!SKIP_DIRS.has(entry.name) && !entry.name.startsWith('.')) {
|
| 169 |
+
yield* walkDirectory(fullPath)
|
| 170 |
+
}
|
| 171 |
+
} else if (entry.isFile()) {
|
| 172 |
+
const ext = extname(entry.name).toLowerCase()
|
| 173 |
+
if (CODE_EXTENSIONS.has(ext)) {
|
| 174 |
+
yield fullPath
|
| 175 |
+
}
|
| 176 |
+
}
|
| 177 |
+
}
|
| 178 |
+
} catch (error) {
|
| 179 |
+
console.warn(`[index] Cannot read directory ${dir}:`, error)
|
| 180 |
+
}
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
// ─── Code Indexer Class ───
|
| 184 |
+
|
| 185 |
+
export class CodeIndexer {
|
| 186 |
+
private index: CodeIndex | null = null
|
| 187 |
+
|
| 188 |
+
async indexProject(projectPath: string): Promise<void> {
|
| 189 |
+
console.log(`[index] Indexing project: ${projectPath}`)
|
| 190 |
+
|
| 191 |
+
const chunks: (CodeChunk & { embedding: number[] })[] = []
|
| 192 |
+
|
| 193 |
+
for await (const filePath of walkDirectory(projectPath)) {
|
| 194 |
+
try {
|
| 195 |
+
const content = await readFile(filePath, 'utf-8')
|
| 196 |
+
const relPath = relative(projectPath, filePath)
|
| 197 |
+
const language = getLanguage(filePath)
|
| 198 |
+
|
| 199 |
+
const fileChunks = splitIntoChunks(content, relPath, language)
|
| 200 |
+
|
| 201 |
+
for (const chunk of fileChunks) {
|
| 202 |
+
chunks.push({
|
| 203 |
+
...chunk,
|
| 204 |
+
embedding: generateEmbedding(chunk.content),
|
| 205 |
+
})
|
| 206 |
+
}
|
| 207 |
+
} catch (error) {
|
| 208 |
+
console.warn(`[index] Cannot read file ${filePath}:`, error)
|
| 209 |
+
}
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
this.index = {
|
| 213 |
+
version: '1.0.0',
|
| 214 |
+
projectPath,
|
| 215 |
+
indexedAt: new Date().toISOString(),
|
| 216 |
+
chunks,
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
console.log(`[index] Indexed ${chunks.length} chunks from project`)
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
async search(query: string, topK: number = TOP_K): Promise<CodeSearchResult[]> {
|
| 223 |
+
if (!this.index) {
|
| 224 |
+
console.warn('[index] No index loaded')
|
| 225 |
+
return []
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
const queryEmbedding = generateEmbedding(query)
|
| 229 |
+
|
| 230 |
+
// Calculate similarity for each chunk
|
| 231 |
+
const results = this.index.chunks.map(chunk => ({
|
| 232 |
+
chunk: {
|
| 233 |
+
filePath: chunk.filePath,
|
| 234 |
+
content: chunk.content,
|
| 235 |
+
startLine: chunk.startLine,
|
| 236 |
+
endLine: chunk.endLine,
|
| 237 |
+
language: chunk.language,
|
| 238 |
+
chunkType: chunk.chunkType,
|
| 239 |
+
},
|
| 240 |
+
filePath: chunk.filePath,
|
| 241 |
+
startLine: chunk.startLine,
|
| 242 |
+
endLine: chunk.endLine,
|
| 243 |
+
score: cosineSimilarity(queryEmbedding, chunk.embedding),
|
| 244 |
+
}))
|
| 245 |
+
|
| 246 |
+
// Sort by score and return top K
|
| 247 |
+
results.sort((a, b) => b.score - a.score)
|
| 248 |
+
return results.slice(0, topK)
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
async saveIndex(path: string): Promise<void> {
|
| 252 |
+
if (!this.index) {
|
| 253 |
+
throw new Error('No index to save')
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
await writeFile(path, JSON.stringify(this.index, null, 2))
|
| 257 |
+
console.log(`[index] Saved index to ${path}`)
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
async loadIndex(path: string): Promise<void> {
|
| 261 |
+
const content = await readFile(path, 'utf-8')
|
| 262 |
+
this.index = JSON.parse(content)
|
| 263 |
+
console.log(`[index] Loaded index with ${this.index.chunks.length} chunks`)
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
getIndexStats(): { chunkCount: number; indexedAt: string } | null {
|
| 267 |
+
if (!this.index) return null
|
| 268 |
+
return {
|
| 269 |
+
chunkCount: this.index.chunks.length,
|
| 270 |
+
indexedAt: this.index.indexedAt,
|
| 271 |
+
}
|
| 272 |
+
}
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
// ─── Factory ───
|
| 276 |
+
|
| 277 |
+
export function createIndexer(): CodeIndexer {
|
| 278 |
+
return new CodeIndexer()
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
export default {
|
| 282 |
+
CodeIndexer,
|
| 283 |
+
createIndexer,
|
| 284 |
+
}
|
src/indexing/index.ts
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Stack 2.9 Indexing Module
|
| 2 |
+
//
|
| 3 |
+
// Semantic code search (RAG) for codebase understanding.
|
| 4 |
+
|
| 5 |
+
export {
|
| 6 |
+
CodeIndexer,
|
| 7 |
+
createIndexer,
|
| 8 |
+
} from './CodeIndexer.ts'
|
| 9 |
+
|
| 10 |
+
export type {
|
| 11 |
+
CodeChunk,
|
| 12 |
+
CodeIndex,
|
| 13 |
+
CodeSearchResult,
|
| 14 |
+
} from './CodeIndexer.ts'
|
| 15 |
+
|
| 16 |
+
export default {
|
| 17 |
+
createIndexer,
|
| 18 |
+
}
|
src/llm/LLMService.ts
ADDED
|
@@ -0,0 +1,354 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// LLM Service - Multi-provider LLM client for Stack 2.9
|
| 2 |
+
//
|
| 3 |
+
// Supports: OpenAI, Anthropic, Ollama, and custom endpoints
|
| 4 |
+
// with automatic fallback on failure.
|
| 5 |
+
|
| 6 |
+
export type LLMProviderType = 'openai' | 'anthropic' | 'ollama' | 'custom'
|
| 7 |
+
|
| 8 |
+
export interface LLMConfig {
|
| 9 |
+
provider: LLMProviderType
|
| 10 |
+
apiKey?: string
|
| 11 |
+
baseURL?: string
|
| 12 |
+
model: string
|
| 13 |
+
maxTokens?: number
|
| 14 |
+
temperature?: number
|
| 15 |
+
topP?: number
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
export interface ChatMessage {
|
| 19 |
+
role: 'system' | 'user' | 'assistant'
|
| 20 |
+
content: string
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
export interface ChatParams {
|
| 24 |
+
messages: ChatMessage[]
|
| 25 |
+
model?: string
|
| 26 |
+
maxTokens?: number
|
| 27 |
+
temperature?: number
|
| 28 |
+
topP?: number
|
| 29 |
+
tools?: unknown[]
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
export interface ChatResponse {
|
| 33 |
+
content: string
|
| 34 |
+
model: string
|
| 35 |
+
usage?: {
|
| 36 |
+
inputTokens: number
|
| 37 |
+
outputTokens: number
|
| 38 |
+
}
|
| 39 |
+
finishReason: 'stop' | 'length' | 'content_filter' | null
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
export interface LLMProvider {
|
| 43 |
+
readonly type: LLMProviderType
|
| 44 |
+
readonly name: string
|
| 45 |
+
|
| 46 |
+
isAvailable(): boolean
|
| 47 |
+
chat(params: ChatParams): Promise<ChatResponse>
|
| 48 |
+
listModels(): string[]
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
// ─── OpenAI Provider ───
|
| 52 |
+
|
| 53 |
+
export class OpenAIProvider implements LLMProvider {
|
| 54 |
+
readonly type: LLMProviderType = 'openai'
|
| 55 |
+
readonly name = 'OpenAI'
|
| 56 |
+
|
| 57 |
+
private apiKey: string
|
| 58 |
+
private baseURL: string
|
| 59 |
+
private model: string
|
| 60 |
+
|
| 61 |
+
constructor(config: { apiKey: string; baseURL?: string; model?: string }) {
|
| 62 |
+
this.apiKey = config.apiKey
|
| 63 |
+
this.baseURL = config.baseURL ?? 'https://api.openai.com/v1'
|
| 64 |
+
this.model = config.model ?? 'gpt-4'
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
isAvailable(): boolean {
|
| 68 |
+
return Boolean(this.apiKey)
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
async chat(params: ChatParams): Promise<ChatResponse> {
|
| 72 |
+
const response = await fetch(`${this.baseURL}/chat/completions`, {
|
| 73 |
+
method: 'POST',
|
| 74 |
+
headers: {
|
| 75 |
+
'Content-Type': 'application/json',
|
| 76 |
+
'Authorization': `Bearer ${this.apiKey}`,
|
| 77 |
+
},
|
| 78 |
+
body: JSON.stringify({
|
| 79 |
+
model: params.model ?? this.model,
|
| 80 |
+
messages: params.messages,
|
| 81 |
+
max_tokens: params.maxTokens,
|
| 82 |
+
temperature: params.temperature,
|
| 83 |
+
top_p: params.topP,
|
| 84 |
+
tools: params.tools,
|
| 85 |
+
}),
|
| 86 |
+
})
|
| 87 |
+
|
| 88 |
+
if (!response.ok) {
|
| 89 |
+
throw new Error(`OpenAI API error: ${response.status} ${response.statusText}`)
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
const data = await response.json() as {
|
| 93 |
+
choices: Array<{ message: { content: string }; finish_reason: string }>
|
| 94 |
+
model: string
|
| 95 |
+
usage: { prompt_tokens: number; completion_tokens: number }
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
return {
|
| 99 |
+
content: data.choices[0]?.message?.content ?? '',
|
| 100 |
+
model: data.model,
|
| 101 |
+
usage: {
|
| 102 |
+
inputTokens: data.usage?.prompt_tokens ?? 0,
|
| 103 |
+
outputTokens: data.usage?.completion_tokens ?? 0,
|
| 104 |
+
},
|
| 105 |
+
finishReason: data.choices[0]?.finish_reason as ChatResponse['finishReason'],
|
| 106 |
+
}
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
listModels(): string[] {
|
| 110 |
+
return ['gpt-4', 'gpt-4-turbo', 'gpt-3.5-turbo', 'gpt-4o']
|
| 111 |
+
}
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
// ─── Anthropic Provider ───
|
| 115 |
+
|
| 116 |
+
export class AnthropicProvider implements LLMProvider {
|
| 117 |
+
readonly type: LLMProviderType = 'anthropic'
|
| 118 |
+
readonly name = 'Anthropic'
|
| 119 |
+
|
| 120 |
+
private apiKey: string
|
| 121 |
+
private baseURL: string
|
| 122 |
+
private model: string
|
| 123 |
+
|
| 124 |
+
constructor(config: { apiKey: string; baseURL?: string; model?: string }) {
|
| 125 |
+
this.apiKey = config.apiKey
|
| 126 |
+
this.baseURL = config.baseURL ?? 'https://api.anthropic.com'
|
| 127 |
+
this.model = config.model ?? 'claude-3-sonnet-20240229'
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
isAvailable(): boolean {
|
| 131 |
+
return Boolean(this.apiKey)
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
async chat(params: ChatParams): Promise<ChatResponse> {
|
| 135 |
+
// Extract system message
|
| 136 |
+
const systemMessage = params.messages.find(m => m.role === 'system')?.content
|
| 137 |
+
const filteredMessages = params.messages.filter(m => m.role !== 'system')
|
| 138 |
+
|
| 139 |
+
const response = await fetch(`${this.baseURL}/v1/messages`, {
|
| 140 |
+
method: 'POST',
|
| 141 |
+
headers: {
|
| 142 |
+
'Content-Type': 'application/json',
|
| 143 |
+
'x-api-key': this.apiKey,
|
| 144 |
+
'anthropic-version': '2023-06-01',
|
| 145 |
+
},
|
| 146 |
+
body: JSON.stringify({
|
| 147 |
+
model: params.model ?? this.model,
|
| 148 |
+
messages: filteredMessages,
|
| 149 |
+
system: systemMessage,
|
| 150 |
+
max_tokens: params.maxTokens ?? 1024,
|
| 151 |
+
temperature: params.temperature,
|
| 152 |
+
top_p: params.topP,
|
| 153 |
+
}),
|
| 154 |
+
})
|
| 155 |
+
|
| 156 |
+
if (!response.ok) {
|
| 157 |
+
throw new Error(`Anthropic API error: ${response.status} ${response.statusText}`)
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
const data = await response.json() as {
|
| 161 |
+
content: Array<{ type: string; text?: string }>
|
| 162 |
+
model: string
|
| 163 |
+
usage: { input_tokens: number; output_tokens: number }
|
| 164 |
+
stop_reason: string
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
return {
|
| 168 |
+
content: data.content.find(c => c.type === 'text')?.text ?? '',
|
| 169 |
+
model: data.model,
|
| 170 |
+
usage: {
|
| 171 |
+
inputTokens: data.usage?.input_tokens ?? 0,
|
| 172 |
+
outputTokens: data.usage?.output_tokens ?? 0,
|
| 173 |
+
},
|
| 174 |
+
finishReason: data.stop_reason as ChatResponse['finishReason'],
|
| 175 |
+
}
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
listModels(): string[] {
|
| 179 |
+
return ['claude-3-opus', 'claude-3-sonnet', 'claude-3-haiku']
|
| 180 |
+
}
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
// ─── Ollama Provider ───
|
| 184 |
+
|
| 185 |
+
export class OllamaProvider implements LLMProvider {
|
| 186 |
+
readonly type: LLMProviderType = 'ollama'
|
| 187 |
+
readonly name = 'Ollama'
|
| 188 |
+
|
| 189 |
+
private baseURL: string
|
| 190 |
+
private model: string
|
| 191 |
+
|
| 192 |
+
constructor(config: { baseURL?: string; model?: string }) {
|
| 193 |
+
this.baseURL = config.baseURL ?? 'http://localhost:11434'
|
| 194 |
+
this.model = config.model ?? 'llama2'
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
isAvailable(): boolean {
|
| 198 |
+
return true // Ollama is local, always available if running
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
async chat(params: ChatParams): Promise<ChatResponse> {
|
| 202 |
+
const response = await fetch(`${this.baseURL}/api/chat`, {
|
| 203 |
+
method: 'POST',
|
| 204 |
+
headers: { 'Content-Type': 'application/json' },
|
| 205 |
+
body: JSON.stringify({
|
| 206 |
+
model: params.model ?? this.model,
|
| 207 |
+
messages: params.messages,
|
| 208 |
+
options: {
|
| 209 |
+
temperature: params.temperature,
|
| 210 |
+
top_p: params.topP,
|
| 211 |
+
num_predict: params.maxTokens,
|
| 212 |
+
},
|
| 213 |
+
stream: false,
|
| 214 |
+
}),
|
| 215 |
+
})
|
| 216 |
+
|
| 217 |
+
if (!response.ok) {
|
| 218 |
+
throw new Error(`Ollama error: ${response.status} ${response.statusText}`)
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
const data = await response.json() as {
|
| 222 |
+
message: { content: string }
|
| 223 |
+
model: string
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
return {
|
| 227 |
+
content: data.message?.content ?? '',
|
| 228 |
+
model: data.model,
|
| 229 |
+
finishReason: 'stop',
|
| 230 |
+
}
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
async listModels(): Promise<string[]> {
|
| 234 |
+
try {
|
| 235 |
+
const response = await fetch(`${this.baseURL}/api/tags`)
|
| 236 |
+
if (!response.ok) return [this.model]
|
| 237 |
+
|
| 238 |
+
const data = await response.json() as { models: Array<{ name: string }> }
|
| 239 |
+
return data.models.map(m => m.name)
|
| 240 |
+
} catch {
|
| 241 |
+
return [this.model]
|
| 242 |
+
}
|
| 243 |
+
}
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
// ─── LLM Router ───
|
| 247 |
+
|
| 248 |
+
export class LLMRouter {
|
| 249 |
+
private providers: Map<LLMProviderType, LLMProvider> = new Map()
|
| 250 |
+
private defaultProvider: LLMProviderType = 'ollama'
|
| 251 |
+
|
| 252 |
+
addProvider(provider: LLMProvider): void {
|
| 253 |
+
this.providers.set(provider.type, provider)
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
setDefault(provider: LLMProviderType): void {
|
| 257 |
+
if (!this.providers.has(provider)) {
|
| 258 |
+
throw new Error(`Provider ${provider} not configured`)
|
| 259 |
+
}
|
| 260 |
+
this.defaultProvider = provider
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
getProvider(type?: LLMProviderType): LLMProvider {
|
| 264 |
+
const provider = type ?? this.defaultProvider
|
| 265 |
+
const instance = this.providers.get(provider)
|
| 266 |
+
if (!instance) {
|
| 267 |
+
throw new Error(`Provider ${provider} not configured`)
|
| 268 |
+
}
|
| 269 |
+
return instance
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
async chat(params: ChatParams & { provider?: LLMProviderType }): Promise<ChatResponse> {
|
| 273 |
+
const provider = this.getProvider(params.provider)
|
| 274 |
+
return provider.chat(params)
|
| 275 |
+
}
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
// ─── Factory ───
|
| 279 |
+
|
| 280 |
+
export function createProvider(config: LLMConfig): LLMProvider {
|
| 281 |
+
switch (config.provider) {
|
| 282 |
+
case 'openai':
|
| 283 |
+
return new OpenAIProvider({
|
| 284 |
+
apiKey: config.apiKey ?? '',
|
| 285 |
+
baseURL: config.baseURL,
|
| 286 |
+
model: config.model,
|
| 287 |
+
})
|
| 288 |
+
case 'anthropic':
|
| 289 |
+
return new AnthropicProvider({
|
| 290 |
+
apiKey: config.apiKey ?? '',
|
| 291 |
+
baseURL: config.baseURL,
|
| 292 |
+
model: config.model,
|
| 293 |
+
})
|
| 294 |
+
case 'ollama':
|
| 295 |
+
return new OllamaProvider({
|
| 296 |
+
baseURL: config.baseURL,
|
| 297 |
+
model: config.model,
|
| 298 |
+
})
|
| 299 |
+
default:
|
| 300 |
+
throw new Error(`Unknown provider: ${config.provider}`)
|
| 301 |
+
}
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
export function createRouter(configs: LLMConfig[]): LLMRouter {
|
| 305 |
+
const router = new LLMRouter()
|
| 306 |
+
|
| 307 |
+
for (const config of configs) {
|
| 308 |
+
router.addProvider(createProvider(config))
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
return router
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
// Default router from environment
|
| 315 |
+
export function createRouterFromEnv(): LLMRouter {
|
| 316 |
+
const configs: LLMConfig[] = []
|
| 317 |
+
|
| 318 |
+
// Check for OpenAI
|
| 319 |
+
if (process.env.OPENAI_API_KEY) {
|
| 320 |
+
configs.push({
|
| 321 |
+
provider: 'openai',
|
| 322 |
+
apiKey: process.env.OPENAI_API_KEY,
|
| 323 |
+
model: process.env.OPENAI_MODEL ?? 'gpt-4',
|
| 324 |
+
})
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
// Check for Anthropic
|
| 328 |
+
if (process.env.ANTHROPIC_API_KEY) {
|
| 329 |
+
configs.push({
|
| 330 |
+
provider: 'anthropic',
|
| 331 |
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
| 332 |
+
model: process.env.ANTHROPIC_MODEL ?? 'claude-3-sonnet-20240229',
|
| 333 |
+
})
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
// Always add Ollama (local)
|
| 337 |
+
configs.push({
|
| 338 |
+
provider: 'ollama',
|
| 339 |
+
baseURL: process.env.OLLAMA_BASE_URL,
|
| 340 |
+
model: process.env.OLLAMA_MODEL ?? 'llama2',
|
| 341 |
+
})
|
| 342 |
+
|
| 343 |
+
return createRouter(configs)
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
export default {
|
| 347 |
+
OpenAIProvider,
|
| 348 |
+
AnthropicProvider,
|
| 349 |
+
OllamaProvider,
|
| 350 |
+
LLMRouter,
|
| 351 |
+
createProvider,
|
| 352 |
+
createRouter,
|
| 353 |
+
createRouterFromEnv,
|
| 354 |
+
}
|
src/llm/index.ts
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Stack 2.9 LLM Module
|
| 2 |
+
//
|
| 3 |
+
// Multi-provider LLM client supporting OpenAI, Anthropic, and Ollama.
|
| 4 |
+
|
| 5 |
+
export {
|
| 6 |
+
OpenAIProvider,
|
| 7 |
+
AnthropicProvider,
|
| 8 |
+
OllamaProvider,
|
| 9 |
+
LLMRouter,
|
| 10 |
+
createProvider,
|
| 11 |
+
createRouter,
|
| 12 |
+
createRouterFromEnv,
|
| 13 |
+
} from './LLMService.ts'
|
| 14 |
+
|
| 15 |
+
export type {
|
| 16 |
+
LLMProviderType,
|
| 17 |
+
LLMConfig,
|
| 18 |
+
ChatMessage,
|
| 19 |
+
ChatParams,
|
| 20 |
+
ChatResponse,
|
| 21 |
+
LLMProvider,
|
| 22 |
+
} from './LLMService.ts'
|
| 23 |
+
|
| 24 |
+
export default {
|
| 25 |
+
createRouter: createRouterFromEnv,
|
| 26 |
+
}
|
src/mcp/MCPClient.ts
ADDED
|
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// MCP Client - Model Context Protocol client for Stack 2.9
|
| 2 |
+
//
|
| 3 |
+
// Provides MCP server integration for tool extensibility.
|
| 4 |
+
// Supports stdio, SSE, and HTTP transports.
|
| 5 |
+
|
| 6 |
+
export type MCPTransportType = 'stdio' | 'sse' | 'http'
|
| 7 |
+
|
| 8 |
+
export interface MCPConfig {
|
| 9 |
+
name: string
|
| 10 |
+
command?: string
|
| 11 |
+
args?: string[]
|
| 12 |
+
env?: Record<string, string>
|
| 13 |
+
url?: string
|
| 14 |
+
transport?: MCPTransportType
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
export interface MCPTool {
|
| 18 |
+
name: string
|
| 19 |
+
description: string
|
| 20 |
+
inputSchema: Record<string, unknown>
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
export interface MCPResource {
|
| 24 |
+
uri: string
|
| 25 |
+
name: string
|
| 26 |
+
description?: string
|
| 27 |
+
mimeType?: string
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
export interface MCP_SERVER_CONFIG {
|
| 31 |
+
name: string
|
| 32 |
+
transport: 'stdio' | 'sse' | 'http'
|
| 33 |
+
command?: string
|
| 34 |
+
args?: string[]
|
| 35 |
+
env?: Record<string, string>
|
| 36 |
+
url?: string
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
interface MCPRequest {
|
| 40 |
+
jsonrpc: '2.0'
|
| 41 |
+
id: number | string
|
| 42 |
+
method: string
|
| 43 |
+
params?: Record<string, unknown>
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
interface MCPResponse {
|
| 47 |
+
jsonrpc: '2.0'
|
| 48 |
+
id: number | string
|
| 49 |
+
result?: unknown
|
| 50 |
+
error?: {
|
| 51 |
+
code: number
|
| 52 |
+
message: string
|
| 53 |
+
data?: unknown
|
| 54 |
+
}
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
// ─── MCP Client ───
|
| 58 |
+
|
| 59 |
+
export class MCPClient {
|
| 60 |
+
private config: MCP_SERVER_CONFIG
|
| 61 |
+
private requestId = 0
|
| 62 |
+
private pendingRequests: Map<number | string, {
|
| 63 |
+
resolve: (value: unknown) => void
|
| 64 |
+
reject: (error: Error) => void
|
| 65 |
+
}> = new Map()
|
| 66 |
+
|
| 67 |
+
constructor(config: MCP_SERVER_CONFIG) {
|
| 68 |
+
this.config = config
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
get name(): string {
|
| 72 |
+
return this.config.name
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
get transport(): string {
|
| 76 |
+
return this.config.transport
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
// Send an MCP request and wait for response
|
| 80 |
+
async sendRequest(method: string, params?: Record<string, unknown>): Promise<unknown> {
|
| 81 |
+
const id = ++this.requestId
|
| 82 |
+
const request: MCPRequest = {
|
| 83 |
+
jsonrpc: '2.0',
|
| 84 |
+
id,
|
| 85 |
+
method,
|
| 86 |
+
params,
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
return new Promise((resolve, reject) => {
|
| 90 |
+
this.pendingRequests.set(id, { resolve, reject })
|
| 91 |
+
|
| 92 |
+
if (this.config.transport === 'stdio') {
|
| 93 |
+
this.sendStdioRequest(request)
|
| 94 |
+
} else if (this.config.transport === 'http' || this.config.transport === 'sse') {
|
| 95 |
+
this.sendHttpRequest(request)
|
| 96 |
+
}
|
| 97 |
+
})
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
private async sendStdioRequest(request: MCPRequest): Promise<void> {
|
| 101 |
+
// In stdio mode, would spawn the process and communicate via stdin/stdout
|
| 102 |
+
console.log('[mcp] Stdio request:', request)
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
private async sendHttpRequest(request: MCPRequest): Promise<void> {
|
| 106 |
+
const url = this.config.url
|
| 107 |
+
if (!url) {
|
| 108 |
+
throw new Error('MCP HTTP client requires URL')
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
try {
|
| 112 |
+
const response = await fetch(url, {
|
| 113 |
+
method: 'POST',
|
| 114 |
+
headers: { 'Content-Type': 'application/json' },
|
| 115 |
+
body: JSON.stringify(request),
|
| 116 |
+
})
|
| 117 |
+
|
| 118 |
+
if (!response.ok) {
|
| 119 |
+
throw new Error(`MCP request failed: ${response.status}`)
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
const data = await response.json() as MCPResponse
|
| 123 |
+
const pending = this.pendingRequests.get(data.id)
|
| 124 |
+
if (pending) {
|
| 125 |
+
if (data.error) {
|
| 126 |
+
pending.reject(new Error(data.error.message))
|
| 127 |
+
} else {
|
| 128 |
+
pending.resolve(data.result)
|
| 129 |
+
}
|
| 130 |
+
this.pendingRequests.delete(data.id)
|
| 131 |
+
}
|
| 132 |
+
} catch (error) {
|
| 133 |
+
// Reject all pending requests
|
| 134 |
+
for (const [, pending] of this.pendingRequests) {
|
| 135 |
+
pending.reject(error as Error)
|
| 136 |
+
}
|
| 137 |
+
this.pendingRequests.clear()
|
| 138 |
+
}
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
// List available tools
|
| 142 |
+
async listTools(): Promise<MCPTool[]> {
|
| 143 |
+
try {
|
| 144 |
+
const result = await this.sendRequest('tools/list') as {
|
| 145 |
+
tools: Array<{
|
| 146 |
+
name: string
|
| 147 |
+
description?: string
|
| 148 |
+
inputSchema?: Record<string, unknown>
|
| 149 |
+
}>
|
| 150 |
+
}
|
| 151 |
+
return (result.tools ?? []).map(t => ({
|
| 152 |
+
name: t.name,
|
| 153 |
+
description: t.description ?? '',
|
| 154 |
+
inputSchema: t.inputSchema ?? {},
|
| 155 |
+
}))
|
| 156 |
+
} catch {
|
| 157 |
+
return []
|
| 158 |
+
}
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
// Call a tool
|
| 162 |
+
async callTool(name: string, args: Record<string, unknown>): Promise<unknown> {
|
| 163 |
+
return this.sendRequest('tools/call', { name, arguments: args })
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
// List available resources
|
| 167 |
+
async listResources(): Promise<MCPResource[]> {
|
| 168 |
+
try {
|
| 169 |
+
const result = await this.sendRequest('resources/list') as {
|
| 170 |
+
resources: Array<{
|
| 171 |
+
uri: string
|
| 172 |
+
name: string
|
| 173 |
+
description?: string
|
| 174 |
+
mimeType?: string
|
| 175 |
+
}>
|
| 176 |
+
}
|
| 177 |
+
return (result.resources ?? []).map(r => ({
|
| 178 |
+
uri: r.uri,
|
| 179 |
+
name: r.name,
|
| 180 |
+
description: r.description,
|
| 181 |
+
mimeType: r.mimeType,
|
| 182 |
+
}))
|
| 183 |
+
} catch {
|
| 184 |
+
return []
|
| 185 |
+
}
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
// Read a resource
|
| 189 |
+
async readResource(uri: string): Promise<unknown> {
|
| 190 |
+
return this.sendRequest('resources/read', { uri })
|
| 191 |
+
}
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
// ─── MCP Connection Manager ───
|
| 195 |
+
|
| 196 |
+
export class MCPConnectionManager {
|
| 197 |
+
private connections: Map<string, MCPClient> = new Map()
|
| 198 |
+
|
| 199 |
+
async addServer(config: MCP_SERVER_CONFIG): Promise<MCPClient> {
|
| 200 |
+
const client = new MCPClient(config)
|
| 201 |
+
this.connections.set(config.name, client)
|
| 202 |
+
|
| 203 |
+
// Initialize the connection
|
| 204 |
+
try {
|
| 205 |
+
await client.sendRequest('initialize', {
|
| 206 |
+
protocolVersion: '2024-11-05',
|
| 207 |
+
capabilities: {},
|
| 208 |
+
clientInfo: {
|
| 209 |
+
name: 'stack-2.9',
|
| 210 |
+
version: '1.0.0',
|
| 211 |
+
},
|
| 212 |
+
})
|
| 213 |
+
console.log(`[mcp] Connected to ${config.name}`)
|
| 214 |
+
} catch (error) {
|
| 215 |
+
console.error(`[mcp] Failed to connect to ${config.name}:`, error)
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
return client
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
getServer(name: string): MCPClient | undefined {
|
| 222 |
+
return this.connections.get(name)
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
removeServer(name: string): void {
|
| 226 |
+
this.connections.delete(name)
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
listServers(): string[] {
|
| 230 |
+
return Array.from(this.connections.keys())
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
async closeAll(): Promise<void> {
|
| 234 |
+
for (const [name, client] of this.connections) {
|
| 235 |
+
try {
|
| 236 |
+
await client.sendRequest('shutdown')
|
| 237 |
+
} catch {
|
| 238 |
+
// Ignore shutdown errors
|
| 239 |
+
}
|
| 240 |
+
console.log(`[mcp] Disconnected from ${name}`)
|
| 241 |
+
}
|
| 242 |
+
this.connections.clear()
|
| 243 |
+
}
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
// ─── Factory ───
|
| 247 |
+
|
| 248 |
+
export function createMCPClient(config: MCPConfig): MCPClient {
|
| 249 |
+
const serverConfig: MCP_SERVER_CONFIG = {
|
| 250 |
+
name: config.name,
|
| 251 |
+
transport: config.transport ?? 'stdio',
|
| 252 |
+
command: config.command,
|
| 253 |
+
args: config.args,
|
| 254 |
+
env: config.env,
|
| 255 |
+
url: config.url,
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
return new MCPClient(serverConfig)
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
export default {
|
| 262 |
+
MCPClient,
|
| 263 |
+
MCPConnectionManager,
|
| 264 |
+
createMCPClient,
|
| 265 |
+
}
|
src/mcp/index.ts
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Stack 2.9 MCP Module
|
| 2 |
+
//
|
| 3 |
+
// Model Context Protocol client for tool extensibility.
|
| 4 |
+
|
| 5 |
+
export {
|
| 6 |
+
MCPClient,
|
| 7 |
+
MCPConnectionManager,
|
| 8 |
+
createMCPClient,
|
| 9 |
+
} from './MCPClient.ts'
|
| 10 |
+
|
| 11 |
+
export type {
|
| 12 |
+
MCPTransportType,
|
| 13 |
+
MCPConfig,
|
| 14 |
+
MCP_SERVER_CONFIG,
|
| 15 |
+
MCPTool,
|
| 16 |
+
MCPResource,
|
| 17 |
+
} from './MCPClient.ts'
|
| 18 |
+
|
| 19 |
+
export default {
|
| 20 |
+
MCPConnectionManager,
|
| 21 |
+
}
|
stack-2.9-cli.py → src/stack-2.9-cli.py
RENAMED
|
File without changes
|
stack.py → src/stack.py
RENAMED
|
File without changes
|
src/utils/logger.ts
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Simple logger utility for Stack 2.9
|
| 2 |
+
|
| 3 |
+
type LogLevel = 'debug' | 'info' | 'warn' | 'error'
|
| 4 |
+
|
| 5 |
+
const LOG_LEVELS: Record<LogLevel, number> = {
|
| 6 |
+
debug: 0,
|
| 7 |
+
info: 1,
|
| 8 |
+
warn: 2,
|
| 9 |
+
error: 3,
|
| 10 |
+
}
|
| 11 |
+
|
| 12 |
+
let currentLevel: LogLevel = 'info'
|
| 13 |
+
|
| 14 |
+
export function setLogLevel(level: LogLevel): void {
|
| 15 |
+
currentLevel = level
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
function shouldLog(level: LogLevel): boolean {
|
| 19 |
+
return LOG_LEVELS[level] >= LOG_LEVELS[currentLevel]
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
function formatMessage(level: LogLevel, message: string, data?: unknown): string {
|
| 23 |
+
const timestamp = new Date().toISOString()
|
| 24 |
+
const dataStr = data ? ` ${JSON.stringify(data)}` : ''
|
| 25 |
+
return `[${timestamp}] [${level.toUpperCase()}] ${message}${dataStr}`
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
export function debug(message: string, data?: unknown): void {
|
| 29 |
+
if (shouldLog('debug')) {
|
| 30 |
+
console.log(formatMessage('debug', message, data))
|
| 31 |
+
}
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
export function log(message: string, data?: unknown): void {
|
| 35 |
+
if (shouldLog('info')) {
|
| 36 |
+
console.log(formatMessage('info', message, data))
|
| 37 |
+
}
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
export function warn(message: string, data?: unknown): void {
|
| 41 |
+
if (shouldLog('warn')) {
|
| 42 |
+
console.warn(formatMessage('warn', message, data))
|
| 43 |
+
}
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
export function error(message: string, data?: unknown): void {
|
| 47 |
+
if (shouldLog('error')) {
|
| 48 |
+
console.error(formatMessage('error', message, data))
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
export default { debug, log, warn, error, setLogLevel }
|
src/voice/VoiceApiClient.ts
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Voice API Client - Connects to Python voice server (Coqui TTS)
|
| 2 |
+
//
|
| 3 |
+
// This client provides TypeScript bindings to the Python FastAPI voice service
|
| 4 |
+
// for voice cloning and text-to-speech synthesis.
|
| 5 |
+
|
| 6 |
+
export interface VoiceConfig {
|
| 7 |
+
apiUrl: string
|
| 8 |
+
timeout?: number
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
export interface VoiceModel {
|
| 12 |
+
name: string
|
| 13 |
+
description?: string
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
export interface CloneVoiceRequest {
|
| 17 |
+
voiceName: string
|
| 18 |
+
audioPath?: string
|
| 19 |
+
audioData?: string // base64 encoded audio
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
export interface SynthesizeRequest {
|
| 23 |
+
text: string
|
| 24 |
+
voiceName: string
|
| 25 |
+
language?: string
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
export interface VoiceListResponse {
|
| 29 |
+
voices: VoiceModel[]
|
| 30 |
+
count: number
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
export interface CloneVoiceResponse {
|
| 34 |
+
success: boolean
|
| 35 |
+
voiceName: string
|
| 36 |
+
message: string
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
export class VoiceApiClient {
|
| 40 |
+
private apiUrl: string
|
| 41 |
+
private timeout: number
|
| 42 |
+
|
| 43 |
+
constructor(config: VoiceConfig) {
|
| 44 |
+
this.apiUrl = config.apiUrl.replace(/\/$/, '')
|
| 45 |
+
this.timeout = config.timeout ?? 30000
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
/**
|
| 49 |
+
* List all available voice models
|
| 50 |
+
*/
|
| 51 |
+
async listVoices(): Promise<VoiceListResponse> {
|
| 52 |
+
const response = await fetch(`${this.apiUrl}/voices`, {
|
| 53 |
+
method: 'GET',
|
| 54 |
+
headers: { 'Content-Type': 'application/json' },
|
| 55 |
+
signal: AbortSignal.timeout(this.timeout),
|
| 56 |
+
})
|
| 57 |
+
|
| 58 |
+
if (!response.ok) {
|
| 59 |
+
throw new Error(`Failed to list voices: ${response.status} ${response.statusText}`)
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
return response.json() as Promise<VoiceListResponse>
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
/**
|
| 66 |
+
* Clone a voice from audio sample(s)
|
| 67 |
+
*/
|
| 68 |
+
async cloneVoice(request: CloneVoiceRequest): Promise<CloneVoiceResponse> {
|
| 69 |
+
const response = await fetch(`${this.apiUrl}/clone`, {
|
| 70 |
+
method: 'POST',
|
| 71 |
+
headers: { 'Content-Type': 'application/json' },
|
| 72 |
+
body: JSON.stringify(request),
|
| 73 |
+
signal: AbortSignal.timeout(this.timeout),
|
| 74 |
+
})
|
| 75 |
+
|
| 76 |
+
if (!response.ok) {
|
| 77 |
+
throw new Error(`Failed to clone voice: ${response.status} ${response.statusText}`)
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
return response.json() as Promise<CloneVoiceResponse>
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
/**
|
| 84 |
+
* Synthesize speech with a cloned voice
|
| 85 |
+
* Returns audio data as a Blob
|
| 86 |
+
*/
|
| 87 |
+
async synthesize(request: SynthesizeRequest): Promise<Blob> {
|
| 88 |
+
const response = await fetch(`${this.apiUrl}/synthesize`, {
|
| 89 |
+
method: 'POST',
|
| 90 |
+
headers: { 'Content-Type': 'application/json' },
|
| 91 |
+
body: JSON.stringify(request),
|
| 92 |
+
signal: AbortSignal.timeout(this.timeout),
|
| 93 |
+
})
|
| 94 |
+
|
| 95 |
+
if (!response.ok) {
|
| 96 |
+
throw new Error(`Failed to synthesize: ${response.status} ${response.statusText}`)
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
return response.blob()
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
/**
|
| 103 |
+
* Stream speech synthesis for real-time applications
|
| 104 |
+
*/
|
| 105 |
+
async *streamSynthesize(request: SynthesizeRequest): AsyncGenerator<Uint8Array> {
|
| 106 |
+
const response = await fetch(`${this.apiUrl}/synthesize_stream`, {
|
| 107 |
+
method: 'POST',
|
| 108 |
+
headers: { 'Content-Type': 'application/json' },
|
| 109 |
+
body: JSON.stringify(request),
|
| 110 |
+
signal: AbortSignal.timeout(this.timeout),
|
| 111 |
+
})
|
| 112 |
+
|
| 113 |
+
if (!response.ok) {
|
| 114 |
+
throw new Error(`Failed to stream synthesize: ${response.status} ${response.statusText}`)
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
if (!response.body) {
|
| 118 |
+
throw new Error('Empty response body')
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
const reader = response.body.getReader()
|
| 122 |
+
const decoder = new TextDecoder()
|
| 123 |
+
|
| 124 |
+
try {
|
| 125 |
+
while (true) {
|
| 126 |
+
const { done, value } = await reader.read()
|
| 127 |
+
if (done) break
|
| 128 |
+
yield value
|
| 129 |
+
}
|
| 130 |
+
} finally {
|
| 131 |
+
reader.releaseLock()
|
| 132 |
+
}
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
/**
|
| 136 |
+
* Check if voice server is available
|
| 137 |
+
*/
|
| 138 |
+
async healthCheck(): Promise<boolean> {
|
| 139 |
+
try {
|
| 140 |
+
const response = await fetch(`${this.apiUrl}/health`, {
|
| 141 |
+
method: 'GET',
|
| 142 |
+
signal: AbortSignal.timeout(5000),
|
| 143 |
+
})
|
| 144 |
+
return response.ok
|
| 145 |
+
} catch {
|
| 146 |
+
return false
|
| 147 |
+
}
|
| 148 |
+
}
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
// Default client instance
|
| 152 |
+
let defaultClient: VoiceApiClient | null = null
|
| 153 |
+
|
| 154 |
+
/**
|
| 155 |
+
* Initialize the default voice client
|
| 156 |
+
*/
|
| 157 |
+
export function initVoiceClient(config: VoiceConfig): VoiceApiClient {
|
| 158 |
+
defaultClient = new VoiceApiClient(config)
|
| 159 |
+
return defaultClient
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
/**
|
| 163 |
+
* Get the default voice client
|
| 164 |
+
*/
|
| 165 |
+
export function getVoiceClient(): VoiceApiClient | null {
|
| 166 |
+
return defaultClient
|
| 167 |
+
}
|
src/voice/VoiceRecording.ts
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Voice Recording Service - Audio capture for voice input
|
| 2 |
+
//
|
| 3 |
+
// Handles microphone recording for voice commands using native audio
|
| 4 |
+
// or fallback to SoX/arecord on Linux.
|
| 5 |
+
|
| 6 |
+
import { spawn, type ChildProcess } from 'child_process'
|
| 7 |
+
import { readFile } from 'fs/promises'
|
| 8 |
+
import { log } from '../utils/logger.js'
|
| 9 |
+
|
| 10 |
+
// Recording configuration
|
| 11 |
+
export const RECORDING_SAMPLE_RATE = 16000
|
| 12 |
+
export const RECORDING_CHANNELS = 1
|
| 13 |
+
const SILENCE_DURATION_SECS = '2.0'
|
| 14 |
+
const SILENCE_THRESHOLD = '3%'
|
| 15 |
+
|
| 16 |
+
export type RecordingAvailability = {
|
| 17 |
+
available: boolean
|
| 18 |
+
reason: string | null
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
export type RecordingOptions = {
|
| 22 |
+
silenceDetection?: boolean
|
| 23 |
+
sampleRate?: number
|
| 24 |
+
channels?: number
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
/**
|
| 28 |
+
* Check if recording dependencies are available
|
| 29 |
+
*/
|
| 30 |
+
export async function checkRecordingDependencies(): Promise<{
|
| 31 |
+
available: boolean
|
| 32 |
+
missing: string[]
|
| 33 |
+
}> {
|
| 34 |
+
const missing: string[] = []
|
| 35 |
+
|
| 36 |
+
// Check for SoX (rec command)
|
| 37 |
+
try {
|
| 38 |
+
const result = spawn('rec', ['--version'], { stdio: 'ignore' })
|
| 39 |
+
await new Promise<void>((resolve) => {
|
| 40 |
+
result.on('close', () => resolve())
|
| 41 |
+
result.on('error', () => resolve())
|
| 42 |
+
setTimeout(() => resolve(), 2000)
|
| 43 |
+
})
|
| 44 |
+
} catch {
|
| 45 |
+
missing.push('sox (rec command)')
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
return { available: missing.length === 0, missing }
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
/**
|
| 52 |
+
* Check if recording is available in current environment
|
| 53 |
+
*/
|
| 54 |
+
export async function checkRecordingAvailability(): Promise<RecordingAvailability> {
|
| 55 |
+
// Check for environment variables that indicate remote/no-audio
|
| 56 |
+
if (process.env.CLAUDE_CODE_REMOTE === 'true') {
|
| 57 |
+
return {
|
| 58 |
+
available: false,
|
| 59 |
+
reason: 'Voice mode requires microphone access in local environment',
|
| 60 |
+
}
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
// Check for SoX or native audio
|
| 64 |
+
const deps = await checkRecordingDependencies()
|
| 65 |
+
if (deps.available) {
|
| 66 |
+
return { available: true, reason: null }
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
return {
|
| 70 |
+
available: false,
|
| 71 |
+
reason: `Voice recording requires SoX. Install with: brew install sox (macOS) or sudo apt-get install sox (Linux)`,
|
| 72 |
+
}
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
// Active recorder process
|
| 76 |
+
let activeRecorder: ChildProcess | null = null
|
| 77 |
+
let recordingActive = false
|
| 78 |
+
|
| 79 |
+
/**
|
| 80 |
+
* Start audio recording
|
| 81 |
+
* @param onData Callback for audio chunks
|
| 82 |
+
* @param onEnd Callback when recording ends
|
| 83 |
+
* @param options Recording options
|
| 84 |
+
*/
|
| 85 |
+
export async function startRecording(
|
| 86 |
+
onData: (chunk: Buffer) => void,
|
| 87 |
+
onEnd: () => void,
|
| 88 |
+
options: RecordingOptions = {},
|
| 89 |
+
): Promise<boolean> {
|
| 90 |
+
const sampleRate = options.sampleRate ?? RECORDING_SAMPLE_RATE
|
| 91 |
+
const channels = options.channels ?? RECORDING_CHANNELS
|
| 92 |
+
const useSilenceDetection = options.silenceDetection ?? true
|
| 93 |
+
|
| 94 |
+
log('[voice] Starting recording', { sampleRate, channels, useSilenceDetection })
|
| 95 |
+
|
| 96 |
+
// Build SoX command arguments
|
| 97 |
+
const args = [
|
| 98 |
+
'-q', // quiet
|
| 99 |
+
'--buffer',
|
| 100 |
+
'1024',
|
| 101 |
+
'-t',
|
| 102 |
+
'raw',
|
| 103 |
+
'-r',
|
| 104 |
+
String(sampleRate),
|
| 105 |
+
'-e',
|
| 106 |
+
'signed',
|
| 107 |
+
'-b',
|
| 108 |
+
'16',
|
| 109 |
+
'-c',
|
| 110 |
+
String(channels),
|
| 111 |
+
'-', // stdout
|
| 112 |
+
]
|
| 113 |
+
|
| 114 |
+
// Add silence detection if enabled
|
| 115 |
+
if (useSilenceDetection) {
|
| 116 |
+
args.push(
|
| 117 |
+
'silence',
|
| 118 |
+
'1',
|
| 119 |
+
'0.1',
|
| 120 |
+
SILENCE_THRESHOLD,
|
| 121 |
+
'1',
|
| 122 |
+
SILENCE_DURATION_SECS,
|
| 123 |
+
SILENCE_THRESHOLD,
|
| 124 |
+
)
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
const child = spawn('rec', args, {
|
| 128 |
+
stdio: ['pipe', 'pipe', 'pipe'],
|
| 129 |
+
})
|
| 130 |
+
|
| 131 |
+
activeRecorder = child
|
| 132 |
+
recordingActive = true
|
| 133 |
+
|
| 134 |
+
child.stdout?.on('data', (chunk: Buffer) => {
|
| 135 |
+
onData(chunk)
|
| 136 |
+
})
|
| 137 |
+
|
| 138 |
+
child.stderr?.on('data', () => {}) // Consume stderr
|
| 139 |
+
|
| 140 |
+
child.on('close', () => {
|
| 141 |
+
activeRecorder = null
|
| 142 |
+
recordingActive = false
|
| 143 |
+
onEnd()
|
| 144 |
+
})
|
| 145 |
+
|
| 146 |
+
child.on('error', (err) => {
|
| 147 |
+
log('[voice] Recording error', err)
|
| 148 |
+
activeRecorder = null
|
| 149 |
+
recordingActive = false
|
| 150 |
+
onEnd()
|
| 151 |
+
})
|
| 152 |
+
|
| 153 |
+
return true
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
/**
|
| 157 |
+
* Stop the current recording
|
| 158 |
+
*/
|
| 159 |
+
export function stopRecording(): void {
|
| 160 |
+
if (recordingActive && activeRecorder) {
|
| 161 |
+
activeRecorder.kill('SIGTERM')
|
| 162 |
+
activeRecorder = null
|
| 163 |
+
recordingActive = false
|
| 164 |
+
}
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
/**
|
| 168 |
+
* Check if recording is currently active
|
| 169 |
+
*/
|
| 170 |
+
export function isRecording(): boolean {
|
| 171 |
+
return recordingActive
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
/**
|
| 175 |
+
* Convert audio buffer to base64 for API transfer
|
| 176 |
+
*/
|
| 177 |
+
export function audioToBase64(buffer: Buffer): string {
|
| 178 |
+
return buffer.toString('base64')
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
/**
|
| 182 |
+
* Convert base64 to audio buffer
|
| 183 |
+
*/
|
| 184 |
+
export function base64ToAudio(base64: string): Buffer {
|
| 185 |
+
return Buffer.from(base64, 'base64')
|
| 186 |
+
}
|
src/voice/VoiceTools.ts
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Voice Tools - Tools for voice input/output in the AI assistant
|
| 2 |
+
//
|
| 3 |
+
// Provides tools for:
|
| 4 |
+
// - VoiceRecordingTool: Record voice commands
|
| 5 |
+
// - VoiceSynthesisTool: Speak responses
|
| 6 |
+
// - VoiceCloneTool: Clone voices from samples
|
| 7 |
+
|
| 8 |
+
import { log } from '../utils/logger'
|
| 9 |
+
import { initVoiceClient, getVoiceClient } from './VoiceApiClient'
|
| 10 |
+
import {
|
| 11 |
+
startRecording,
|
| 12 |
+
stopRecording,
|
| 13 |
+
isRecording,
|
| 14 |
+
checkRecordingAvailability,
|
| 15 |
+
audioToBase64,
|
| 16 |
+
type RecordingAvailability
|
| 17 |
+
} from './VoiceRecording'
|
| 18 |
+
|
| 19 |
+
// Tool result types
|
| 20 |
+
export interface ToolResult {
|
| 21 |
+
success: boolean
|
| 22 |
+
data?: unknown
|
| 23 |
+
error?: string
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
// Voice config type
|
| 27 |
+
export interface VoiceConfig {
|
| 28 |
+
apiUrl: string
|
| 29 |
+
timeout?: number
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
// ─── Voice Recording Tool ───
|
| 33 |
+
|
| 34 |
+
/**
|
| 35 |
+
* VoiceRecordingTool - Records voice input from microphone
|
| 36 |
+
*/
|
| 37 |
+
export class VoiceRecordingTool {
|
| 38 |
+
name = 'VoiceRecordingTool'
|
| 39 |
+
description = 'Record voice input from the microphone for voice commands'
|
| 40 |
+
|
| 41 |
+
async execute(options?: { maxDuration?: number }): Promise<ToolResult> {
|
| 42 |
+
try {
|
| 43 |
+
// Check availability
|
| 44 |
+
const availability = await checkRecordingAvailability()
|
| 45 |
+
if (!availability.available) {
|
| 46 |
+
return { success: false, error: availability.reason ?? 'Recording not available' }
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
// Start recording
|
| 50 |
+
let audioChunks: Buffer[] = []
|
| 51 |
+
|
| 52 |
+
const started = await startRecording(
|
| 53 |
+
(chunk) => {
|
| 54 |
+
audioChunks.push(chunk)
|
| 55 |
+
},
|
| 56 |
+
() => {
|
| 57 |
+
log('[voice] Recording ended')
|
| 58 |
+
},
|
| 59 |
+
{ silenceDetection: true }
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
if (!started) {
|
| 63 |
+
return { success: false, error: 'Failed to start recording' }
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
// Wait for recording to end (silence detection)
|
| 67 |
+
await new Promise<void>((resolve) => {
|
| 68 |
+
const checkInterval = setInterval(() => {
|
| 69 |
+
if (!isRecording()) {
|
| 70 |
+
clearInterval(checkInterval)
|
| 71 |
+
resolve()
|
| 72 |
+
}
|
| 73 |
+
}, 100)
|
| 74 |
+
|
| 75 |
+
// Timeout after maxDuration
|
| 76 |
+
if (options?.maxDuration) {
|
| 77 |
+
setTimeout(() => {
|
| 78 |
+
clearInterval(checkInterval)
|
| 79 |
+
stopRecording()
|
| 80 |
+
resolve()
|
| 81 |
+
}, options.maxDuration)
|
| 82 |
+
}
|
| 83 |
+
})
|
| 84 |
+
|
| 85 |
+
// Combine audio chunks
|
| 86 |
+
const audioBuffer = Buffer.concat(audioChunks)
|
| 87 |
+
const base64Audio = audioToBase64(audioBuffer)
|
| 88 |
+
|
| 89 |
+
return {
|
| 90 |
+
success: true,
|
| 91 |
+
data: {
|
| 92 |
+
audio: base64Audio,
|
| 93 |
+
duration: audioBuffer.length / (16000 * 2),
|
| 94 |
+
sampleRate: 16000,
|
| 95 |
+
channels: 1,
|
| 96 |
+
},
|
| 97 |
+
}
|
| 98 |
+
} catch (error) {
|
| 99 |
+
log('[voice] Recording error', error)
|
| 100 |
+
return { success: false, error: String(error) }
|
| 101 |
+
}
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
stop(): void {
|
| 105 |
+
stopRecording()
|
| 106 |
+
}
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
// ─── Voice Synthesis Tool ───
|
| 110 |
+
|
| 111 |
+
/**
|
| 112 |
+
* VoiceSynthesisTool - Convert text to speech using cloned voice
|
| 113 |
+
*/
|
| 114 |
+
export class VoiceSynthesisTool {
|
| 115 |
+
private client: ReturnType<typeof getVoiceClient>
|
| 116 |
+
|
| 117 |
+
constructor(config?: VoiceConfig) {
|
| 118 |
+
if (config) {
|
| 119 |
+
this.client = initVoiceClient(config)
|
| 120 |
+
} else {
|
| 121 |
+
this.client = getVoiceClient()
|
| 122 |
+
}
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
name = 'VoiceSynthesisTool'
|
| 126 |
+
description = 'Convert text to speech using a cloned voice'
|
| 127 |
+
|
| 128 |
+
async execute(request: { text: string; voiceName?: string }): Promise<ToolResult> {
|
| 129 |
+
const client = this.client
|
| 130 |
+
if (!client) {
|
| 131 |
+
return {
|
| 132 |
+
success: false,
|
| 133 |
+
error: 'Voice client not initialized. Call initVoiceClient() first.',
|
| 134 |
+
}
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
try {
|
| 138 |
+
const audioBlob = await client.synthesize({
|
| 139 |
+
text: request.text,
|
| 140 |
+
voiceName: request.voiceName ?? 'default',
|
| 141 |
+
})
|
| 142 |
+
|
| 143 |
+
// Convert blob to base64
|
| 144 |
+
const arrayBuffer = await audioBlob.arrayBuffer()
|
| 145 |
+
const base64Audio = btoa(
|
| 146 |
+
new Uint8Array(arrayBuffer).reduce((data, byte) => data + String.fromCharCode(byte), '')
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
return {
|
| 150 |
+
success: true,
|
| 151 |
+
data: {
|
| 152 |
+
audio: base64Audio,
|
| 153 |
+
format: 'wav',
|
| 154 |
+
text: request.text,
|
| 155 |
+
},
|
| 156 |
+
}
|
| 157 |
+
} catch (error) {
|
| 158 |
+
log('[voice] Synthesis error', error)
|
| 159 |
+
return { success: false, error: String(error) }
|
| 160 |
+
}
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
async *streamExecute(request: { text: string; voiceName?: string }): AsyncGenerator<Uint8Array> {
|
| 164 |
+
const client = this.client
|
| 165 |
+
if (!client) {
|
| 166 |
+
throw new Error('Voice client not initialized')
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
yield* client.streamSynthesize({
|
| 170 |
+
text: request.text,
|
| 171 |
+
voiceName: request.voiceName ?? 'default',
|
| 172 |
+
})
|
| 173 |
+
}
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
// ─── Voice Clone Tool ───
|
| 177 |
+
|
| 178 |
+
/**
|
| 179 |
+
* VoiceCloneTool - Clone a voice from audio samples
|
| 180 |
+
*/
|
| 181 |
+
export class VoiceCloneTool {
|
| 182 |
+
private client: ReturnType<typeof getVoiceClient>
|
| 183 |
+
|
| 184 |
+
constructor(config?: VoiceConfig) {
|
| 185 |
+
if (config) {
|
| 186 |
+
this.client = initVoiceClient(config)
|
| 187 |
+
} else {
|
| 188 |
+
this.client = getVoiceClient()
|
| 189 |
+
}
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
name = 'VoiceCloneTool'
|
| 193 |
+
description = 'Clone a voice from audio samples for use in synthesis'
|
| 194 |
+
|
| 195 |
+
async execute(request: { voiceName: string; audioPath?: string; audioData?: string }): Promise<ToolResult> {
|
| 196 |
+
const client = this.client
|
| 197 |
+
if (!client) {
|
| 198 |
+
return {
|
| 199 |
+
success: false,
|
| 200 |
+
error: 'Voice client not initialized. Call initVoiceClient() first.',
|
| 201 |
+
}
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
try {
|
| 205 |
+
const result = await client.cloneVoice({
|
| 206 |
+
voiceName: request.voiceName,
|
| 207 |
+
audioPath: request.audioPath,
|
| 208 |
+
audioData: request.audioData,
|
| 209 |
+
})
|
| 210 |
+
|
| 211 |
+
return {
|
| 212 |
+
success: result.success,
|
| 213 |
+
data: result,
|
| 214 |
+
}
|
| 215 |
+
} catch (error) {
|
| 216 |
+
log('[voice] Clone error', error)
|
| 217 |
+
return { success: false, error: String(error) }
|
| 218 |
+
}
|
| 219 |
+
}
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
// ─── Voice Status Tool ───
|
| 223 |
+
|
| 224 |
+
/**
|
| 225 |
+
* VoiceStatusTool - Check voice service availability
|
| 226 |
+
*/
|
| 227 |
+
export class VoiceStatusTool {
|
| 228 |
+
private client: ReturnType<typeof getVoiceClient>
|
| 229 |
+
|
| 230 |
+
constructor(config?: VoiceConfig) {
|
| 231 |
+
if (config) {
|
| 232 |
+
this.client = initVoiceClient(config)
|
| 233 |
+
} else {
|
| 234 |
+
this.client = getVoiceClient()
|
| 235 |
+
}
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
name = 'VoiceStatusTool'
|
| 239 |
+
description = 'Check voice service status and list available voices'
|
| 240 |
+
|
| 241 |
+
async execute(): Promise<ToolResult> {
|
| 242 |
+
try {
|
| 243 |
+
// Check recording availability
|
| 244 |
+
const recordingAvail = await checkRecordingAvailability()
|
| 245 |
+
|
| 246 |
+
// Check voice API availability
|
| 247 |
+
let apiAvailable = false
|
| 248 |
+
let voices: string[] = []
|
| 249 |
+
|
| 250 |
+
const client = this.client
|
| 251 |
+
if (client) {
|
| 252 |
+
apiAvailable = await client.healthCheck()
|
| 253 |
+
if (apiAvailable) {
|
| 254 |
+
const voiceList = await client.listVoices()
|
| 255 |
+
voices = voiceList.voices.map((v: { name: string }) => v.name)
|
| 256 |
+
}
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
return {
|
| 260 |
+
success: true,
|
| 261 |
+
data: {
|
| 262 |
+
recording: recordingAvail,
|
| 263 |
+
api: apiAvailable,
|
| 264 |
+
voices,
|
| 265 |
+
},
|
| 266 |
+
}
|
| 267 |
+
} catch (error) {
|
| 268 |
+
return { success: false, error: String(error) }
|
| 269 |
+
}
|
| 270 |
+
}
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
// ─── Tool Registry ───
|
| 274 |
+
|
| 275 |
+
export const voiceTools = {
|
| 276 |
+
VoiceRecordingTool,
|
| 277 |
+
VoiceSynthesisTool,
|
| 278 |
+
VoiceCloneTool,
|
| 279 |
+
VoiceStatusTool,
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
export default voiceTools
|
src/voice/index.ts
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Stack 2.9 Voice Module
|
| 2 |
+
//
|
| 3 |
+
// Voice integration for the Stack 2.9 AI coding assistant.
|
| 4 |
+
// Provides voice input/output capabilities through Python FastAPI backend.
|
| 5 |
+
|
| 6 |
+
import { VoiceApiClient, initVoiceClient, getVoiceClient } from './VoiceApiClient'
|
| 7 |
+
import {
|
| 8 |
+
startRecording,
|
| 9 |
+
stopRecording,
|
| 10 |
+
isRecording,
|
| 11 |
+
checkRecordingAvailability,
|
| 12 |
+
checkRecordingDependencies,
|
| 13 |
+
audioToBase64,
|
| 14 |
+
base64ToAudio,
|
| 15 |
+
RECORDING_SAMPLE_RATE,
|
| 16 |
+
RECORDING_CHANNELS,
|
| 17 |
+
type RecordingAvailability,
|
| 18 |
+
type RecordingOptions,
|
| 19 |
+
} from './VoiceRecording'
|
| 20 |
+
|
| 21 |
+
import {
|
| 22 |
+
VoiceRecordingTool,
|
| 23 |
+
VoiceSynthesisTool,
|
| 24 |
+
VoiceCloneTool,
|
| 25 |
+
VoiceStatusTool,
|
| 26 |
+
voiceTools,
|
| 27 |
+
type ToolResult,
|
| 28 |
+
} from './VoiceTools'
|
| 29 |
+
|
| 30 |
+
export {
|
| 31 |
+
VoiceApiClient,
|
| 32 |
+
initVoiceClient,
|
| 33 |
+
getVoiceClient,
|
| 34 |
+
startRecording,
|
| 35 |
+
stopRecording,
|
| 36 |
+
isRecording,
|
| 37 |
+
checkRecordingAvailability,
|
| 38 |
+
checkRecordingDependencies,
|
| 39 |
+
audioToBase64,
|
| 40 |
+
base64ToAudio,
|
| 41 |
+
RECORDING_SAMPLE_RATE,
|
| 42 |
+
RECORDING_CHANNELS,
|
| 43 |
+
VoiceRecordingTool,
|
| 44 |
+
VoiceSynthesisTool,
|
| 45 |
+
VoiceCloneTool,
|
| 46 |
+
VoiceStatusTool,
|
| 47 |
+
voiceTools,
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
export type {
|
| 51 |
+
RecordingAvailability,
|
| 52 |
+
RecordingOptions,
|
| 53 |
+
ToolResult,
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
// Type exports from VoiceApiClient
|
| 57 |
+
export interface VoiceConfig {
|
| 58 |
+
apiUrl: string
|
| 59 |
+
timeout?: number
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
export interface VoiceModel {
|
| 63 |
+
name: string
|
| 64 |
+
description?: string
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
export interface CloneVoiceRequest {
|
| 68 |
+
voiceName: string
|
| 69 |
+
audioPath?: string
|
| 70 |
+
audioData?: string
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
export interface SynthesizeRequest {
|
| 74 |
+
text: string
|
| 75 |
+
voiceName: string
|
| 76 |
+
language?: string
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
export interface VoiceListResponse {
|
| 80 |
+
voices: VoiceModel[]
|
| 81 |
+
count: number
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
export interface CloneVoiceResponse {
|
| 85 |
+
success: boolean
|
| 86 |
+
voiceName: string
|
| 87 |
+
message: string
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
// Convenience function to initialize voice with config from environment
|
| 91 |
+
export function initVoiceFromEnv(): VoiceApiClient | null {
|
| 92 |
+
const apiUrl = process.env.VOICE_API_URL
|
| 93 |
+
if (!apiUrl) {
|
| 94 |
+
console.warn('[voice] VOICE_API_URL not set, voice features disabled')
|
| 95 |
+
return null
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
return initVoiceClient({
|
| 99 |
+
apiUrl,
|
| 100 |
+
timeout: parseInt(process.env.VOICE_TIMEOUT ?? '30000', 10),
|
| 101 |
+
})
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
export default {
|
| 105 |
+
initVoiceClient,
|
| 106 |
+
getVoiceClient,
|
| 107 |
+
initVoiceFromEnv,
|
| 108 |
+
voiceTools,
|
| 109 |
+
}
|
stack-2.9-training/prepare_dataset.py
CHANGED
|
@@ -1,63 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import json
|
| 2 |
import os
|
|
|
|
| 3 |
from pathlib import Path
|
| 4 |
-
from
|
|
|
|
|
|
|
|
|
|
| 5 |
from transformers import AutoTokenizer
|
| 6 |
-
import pandas as pd
|
| 7 |
|
| 8 |
-
# Load the synthetic examples
|
| 9 |
-
examples_file = Path("/Users/walidsobhi/.openclaw/workspace/training-data/synthetic/examples.jsonl")
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
#
|
| 15 |
-
|
| 16 |
-
|
| 17 |
|
| 18 |
-
#
|
| 19 |
-
if
|
| 20 |
-
|
| 21 |
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
-
#
|
| 25 |
-
if '
|
| 26 |
-
|
| 27 |
-
elif 'prompt' in df.columns and 'completion' in df.columns:
|
| 28 |
-
df['prompt'] = df.apply(lambda row: f"### Prompt:\n{row['prompt']}\n\n### Completion:\n{row['completion']}", axis=1)
|
| 29 |
-
else:
|
| 30 |
-
raise ValueError("Data format not recognized. Expected 'instruction' and 'response' or 'prompt' and 'completion' columns")
|
| 31 |
|
| 32 |
-
#
|
| 33 |
-
|
|
|
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
| 39 |
truncation=True,
|
| 40 |
-
max_length=
|
| 41 |
-
return_tensors=
|
| 42 |
-
)
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Stack 2.9 Dataset Preparation Script
|
| 4 |
+
Loads JSONL training data, applies Qwen chat template, tokenizes, and saves for training.
|
| 5 |
+
Supports multiple input files for combining datasets.
|
| 6 |
+
"""
|
| 7 |
import json
|
| 8 |
import os
|
| 9 |
+
import sys
|
| 10 |
from pathlib import Path
|
| 11 |
+
from typing import List, Optional, Dict, Any
|
| 12 |
+
import argparse
|
| 13 |
+
|
| 14 |
+
from datasets import Dataset, load_dataset, load_from_disk, DatasetDict
|
| 15 |
from transformers import AutoTokenizer
|
|
|
|
| 16 |
|
|
|
|
|
|
|
| 17 |
|
| 18 |
+
SUPPORTED_MODELS = [
|
| 19 |
+
"Qwen/Qwen2.5-Coder-32B",
|
| 20 |
+
"Qwen/Qwen2.5-Coder-14B",
|
| 21 |
+
"Qwen/Qwen2.5-Coder-7B",
|
| 22 |
+
"Qwen/Qwen2.5-Coder-1.5B",
|
| 23 |
+
]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def load_jsonl(file_path: str) -> List[Dict[str, Any]]:
|
| 27 |
+
"""Load JSONL file and return list of dicts."""
|
| 28 |
+
data = []
|
| 29 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 30 |
+
for line_num, line in enumerate(f, 1):
|
| 31 |
+
line = line.strip()
|
| 32 |
+
if not line:
|
| 33 |
+
continue
|
| 34 |
+
try:
|
| 35 |
+
data.append(json.loads(line))
|
| 36 |
+
except json.JSONDecodeError as e:
|
| 37 |
+
print(f"Warning: Skipping line {line_num} in {file_path}: {e}")
|
| 38 |
+
return data
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def format_sample(item: Dict[str, Any]) -> str:
|
| 42 |
+
"""
|
| 43 |
+
Format a sample for causal LM training.
|
| 44 |
+
Supports multiple data formats.
|
| 45 |
+
"""
|
| 46 |
+
# Format 1: instruction + response
|
| 47 |
+
if 'instruction' in item and 'response' in item:
|
| 48 |
+
return f"### Instruction:\n{item['instruction']}\n\n### Response:\n{item['response']}"
|
| 49 |
|
| 50 |
+
# Format 2: prompt + completion
|
| 51 |
+
if 'prompt' in item and 'completion' in item:
|
| 52 |
+
return f"### Prompt:\n{item['prompt']}\n\n### Completion:\n{item['completion']}"
|
| 53 |
|
| 54 |
+
# Format 3: input + output
|
| 55 |
+
if 'input' in item and 'output' in item:
|
| 56 |
+
return f"### Input:\n{item['input']}\n\n### Output:\n{item['output']}"
|
| 57 |
|
| 58 |
+
# Format 4: messages (chat format)
|
| 59 |
+
if 'messages' in item:
|
| 60 |
+
# Convert messages to text format
|
| 61 |
+
messages = item['messages']
|
| 62 |
+
text = ""
|
| 63 |
+
for msg in messages:
|
| 64 |
+
role = msg.get('role', 'user')
|
| 65 |
+
content = msg.get('content', '')
|
| 66 |
+
if role == 'user':
|
| 67 |
+
text += f"### User:\n{content}\n\n"
|
| 68 |
+
elif role == 'assistant':
|
| 69 |
+
text += f"### Assistant:\n{content}\n\n"
|
| 70 |
+
elif role == 'system':
|
| 71 |
+
text += f"### System:\n{content}\n\n"
|
| 72 |
+
return text.strip()
|
| 73 |
|
| 74 |
+
# Format 5: text field only
|
| 75 |
+
if 'text' in item:
|
| 76 |
+
return item['text']
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
+
# Unknown format - return empty string
|
| 79 |
+
print(f"Warning: Unknown format for item: {list(item.keys())}")
|
| 80 |
+
return ""
|
| 81 |
|
| 82 |
+
|
| 83 |
+
def tokenize_function(examples, tokenizer, max_length: int):
|
| 84 |
+
"""Tokenize text examples."""
|
| 85 |
+
return tokenizer(
|
| 86 |
+
examples['text'],
|
| 87 |
+
padding='max_length',
|
| 88 |
truncation=True,
|
| 89 |
+
max_length=max_length,
|
| 90 |
+
return_tensors=None
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def prepare_dataset(
|
| 95 |
+
input_files: List[str],
|
| 96 |
+
output_dir: str,
|
| 97 |
+
model_name: str = "Qwen/Qwen2.5-Coder-32B",
|
| 98 |
+
max_length: int = 4096,
|
| 99 |
+
test_split: float = 0.1,
|
| 100 |
+
use_chat_template: bool = True,
|
| 101 |
+
val_file: Optional[str] = None,
|
| 102 |
+
) -> None:
|
| 103 |
+
"""
|
| 104 |
+
Prepare dataset for training.
|
| 105 |
+
|
| 106 |
+
Args:
|
| 107 |
+
input_files: List of JSONL files to combine for training
|
| 108 |
+
output_dir: Directory to save processed datasets
|
| 109 |
+
model_name: Model name for tokenizer
|
| 110 |
+
max_length: Maximum sequence length
|
| 111 |
+
test_split: Fraction for validation split
|
| 112 |
+
use_chat_template: Whether to apply chat template
|
| 113 |
+
val_file: Optional separate validation file
|
| 114 |
+
"""
|
| 115 |
+
print("=" * 60)
|
| 116 |
+
print("Stack 2.9 Dataset Preparation")
|
| 117 |
+
print("=" * 60)
|
| 118 |
+
|
| 119 |
+
# Validate model
|
| 120 |
+
if model_name not in SUPPORTED_MODELS:
|
| 121 |
+
print(f"Warning: Model {model_name} not in known models, attempting anyway")
|
| 122 |
+
|
| 123 |
+
print(f"\n📋 Configuration:")
|
| 124 |
+
print(f" Model: {model_name}")
|
| 125 |
+
print(f" Max length: {max_length}")
|
| 126 |
+
print(f" Test split: {test_split}")
|
| 127 |
+
|
| 128 |
+
# Load tokenizer
|
| 129 |
+
print(f"\n🔧 Loading tokenizer...")
|
| 130 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 131 |
+
model_name,
|
| 132 |
+
trust_remote_code=True,
|
| 133 |
+
padding_side="right" # Required for causal LM
|
| 134 |
+
)
|
| 135 |
+
if tokenizer.pad_token is None:
|
| 136 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 137 |
+
print(f" Set pad_token to eos_token")
|
| 138 |
+
|
| 139 |
+
# Load and combine training data
|
| 140 |
+
all_train_data = []
|
| 141 |
+
for input_file in input_files:
|
| 142 |
+
input_path = Path(input_file)
|
| 143 |
+
if not input_path.exists():
|
| 144 |
+
print(f"Warning: File not found: {input_file}, skipping")
|
| 145 |
+
continue
|
| 146 |
+
|
| 147 |
+
print(f"\n📂 Loading: {input_file}")
|
| 148 |
+
data = load_jsonl(str(input_path))
|
| 149 |
+
print(f" Loaded {len(data)} examples")
|
| 150 |
+
all_train_data.extend(data)
|
| 151 |
+
|
| 152 |
+
if not all_train_data:
|
| 153 |
+
raise ValueError("No training data loaded!")
|
| 154 |
+
|
| 155 |
+
print(f"\n📊 Total training examples: {len(all_train_data)}")
|
| 156 |
+
|
| 157 |
+
# Format data
|
| 158 |
+
print(f"\n✏️ Formatting examples...")
|
| 159 |
+
formatted_data = []
|
| 160 |
+
for i, item in enumerate(all_train_data):
|
| 161 |
+
text = format_sample(item)
|
| 162 |
+
if text: # Only add non-empty
|
| 163 |
+
formatted_data.append({'text': text})
|
| 164 |
+
|
| 165 |
+
print(f" Formatted {len(formatted_data)} examples")
|
| 166 |
+
|
| 167 |
+
if not formatted_data:
|
| 168 |
+
raise ValueError("No valid training samples after formatting!")
|
| 169 |
+
|
| 170 |
+
# Create dataset
|
| 171 |
+
dataset = Dataset.from_list(formatted_data)
|
| 172 |
+
|
| 173 |
+
# Tokenize
|
| 174 |
+
print(f"\n🔢 Tokenizing...")
|
| 175 |
+
dataset = dataset.map(
|
| 176 |
+
lambda examples: tokenize_function(examples, tokenizer, max_length),
|
| 177 |
+
batched=True,
|
| 178 |
+
remove_columns=['text'],
|
| 179 |
+
desc="Tokenizing"
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
print(f" Tokenized dataset: {len(dataset)} examples")
|
| 183 |
+
|
| 184 |
+
# Split train/val
|
| 185 |
+
if val_file and Path(val_file).exists():
|
| 186 |
+
# Use separate validation file
|
| 187 |
+
print(f"\n📂 Loading separate validation file: {val_file}")
|
| 188 |
+
val_data = load_jsonl(val_file)
|
| 189 |
+
val_formatted = []
|
| 190 |
+
for item in val_data:
|
| 191 |
+
text = format_sample(item)
|
| 192 |
+
if text:
|
| 193 |
+
val_formatted.append({'text': text})
|
| 194 |
+
|
| 195 |
+
val_dataset = Dataset.from_list(val_formatted)
|
| 196 |
+
val_dataset = val_dataset.map(
|
| 197 |
+
lambda examples: tokenize_function(examples, tokenizer, max_length),
|
| 198 |
+
batched=True,
|
| 199 |
+
remove_columns=['text'],
|
| 200 |
+
desc="Tokenizing validation"
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
# Use all of dataset for training, val_dataset for eval
|
| 204 |
+
train_data = dataset
|
| 205 |
+
eval_data = val_dataset
|
| 206 |
+
else:
|
| 207 |
+
# Split from main dataset
|
| 208 |
+
print(f"\n✂️ Splitting dataset...")
|
| 209 |
+
split = dataset.train_test_split(test_size=test_split)
|
| 210 |
+
train_data = split['train']
|
| 211 |
+
eval_data = split['test']
|
| 212 |
+
|
| 213 |
+
print(f" Train: {len(train_data)} examples")
|
| 214 |
+
print(f" Eval: {len(eval_data)} examples")
|
| 215 |
+
|
| 216 |
+
# Save
|
| 217 |
+
output_path = Path(output_dir)
|
| 218 |
+
train_path = output_path / "train"
|
| 219 |
+
eval_path = output_path / "eval"
|
| 220 |
+
|
| 221 |
+
print(f"\n💾 Saving to: {output_dir}")
|
| 222 |
+
train_data.save_to_disk(str(train_path))
|
| 223 |
+
eval_data.save_to_disk(str(eval_path))
|
| 224 |
+
|
| 225 |
+
print(f" ✅ Done!")
|
| 226 |
+
print(f" Train saved to: {train_path}")
|
| 227 |
+
print(f" Eval saved to: {eval_path}")
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def main():
|
| 231 |
+
parser = argparse.ArgumentParser(description="Stack 2.9 Dataset Preparation")
|
| 232 |
+
parser.add_argument(
|
| 233 |
+
"--config",
|
| 234 |
+
type=str,
|
| 235 |
+
default=None,
|
| 236 |
+
help="Path to YAML config file (optional)"
|
| 237 |
+
)
|
| 238 |
+
parser.add_argument(
|
| 239 |
+
"--input",
|
| 240 |
+
type=str,
|
| 241 |
+
nargs="+",
|
| 242 |
+
default=None,
|
| 243 |
+
help="Input JSONL files (space-separated)"
|
| 244 |
+
)
|
| 245 |
+
parser.add_argument(
|
| 246 |
+
"--output",
|
| 247 |
+
type=str,
|
| 248 |
+
default="/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/data",
|
| 249 |
+
help="Output directory for processed datasets"
|
| 250 |
+
)
|
| 251 |
+
parser.add_argument(
|
| 252 |
+
"--model",
|
| 253 |
+
type=str,
|
| 254 |
+
default="Qwen/Qwen2.5-Coder-32B",
|
| 255 |
+
help="Model name for tokenizer"
|
| 256 |
+
)
|
| 257 |
+
parser.add_argument(
|
| 258 |
+
"--max-length",
|
| 259 |
+
type=int,
|
| 260 |
+
default=4096,
|
| 261 |
+
help="Maximum sequence length"
|
| 262 |
+
)
|
| 263 |
+
parser.add_argument(
|
| 264 |
+
"--test-split",
|
| 265 |
+
type=float,
|
| 266 |
+
default=0.1,
|
| 267 |
+
help="Validation split ratio"
|
| 268 |
+
)
|
| 269 |
+
parser.add_argument(
|
| 270 |
+
"--val-file",
|
| 271 |
+
type=str,
|
| 272 |
+
default=None,
|
| 273 |
+
help="Separate validation file (optional)"
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
args = parser.parse_args()
|
| 277 |
+
|
| 278 |
+
# Determine input files
|
| 279 |
+
if args.input:
|
| 280 |
+
input_files = args.input
|
| 281 |
+
else:
|
| 282 |
+
# Default to final training data
|
| 283 |
+
input_files = [
|
| 284 |
+
"/Users/walidsobhi/.openclaw/workspace/stack-2.9/training-data/final/train.jsonl"
|
| 285 |
+
]
|
| 286 |
+
|
| 287 |
+
try:
|
| 288 |
+
prepare_dataset(
|
| 289 |
+
input_files=input_files,
|
| 290 |
+
output_dir=args.output,
|
| 291 |
+
model_name=args.model,
|
| 292 |
+
max_length=args.max_length,
|
| 293 |
+
test_split=args.test_split,
|
| 294 |
+
val_file=args.val_file
|
| 295 |
+
)
|
| 296 |
+
except Exception as e:
|
| 297 |
+
print(f"\n❌ Error: {e}")
|
| 298 |
+
import traceback
|
| 299 |
+
traceback.print_exc()
|
| 300 |
+
sys.exit(1)
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
if __name__ == "__main__":
|
| 304 |
+
main()
|
stack-2.9-training/run_training.py
ADDED
|
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Stack 2.9 Training Pipeline
|
| 4 |
+
Complete end-to-end training pipeline: prepare data → train → merge → verify.
|
| 5 |
+
"""
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
import subprocess
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Optional, List
|
| 11 |
+
import argparse
|
| 12 |
+
import yaml
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def print_header(text: str):
|
| 16 |
+
"""Print a formatted header."""
|
| 17 |
+
print("\n" + "=" * 60)
|
| 18 |
+
print(f" {text}")
|
| 19 |
+
print("=" * 60)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def run_command(cmd: List[str], cwd: Optional[str] = None, env: Optional[dict] = None) -> subprocess.CompletedProcess:
|
| 23 |
+
"""Run a command and return the result."""
|
| 24 |
+
print(f"\n$ {' '.join(cmd)}")
|
| 25 |
+
result = subprocess.run(
|
| 26 |
+
cmd,
|
| 27 |
+
cwd=cwd,
|
| 28 |
+
env=env,
|
| 29 |
+
capture_output=False # Show output in real-time
|
| 30 |
+
)
|
| 31 |
+
if result.returncode != 0:
|
| 32 |
+
print(f"❌ Command failed with exit code {result.returncode}")
|
| 33 |
+
sys.exit(result.returncode)
|
| 34 |
+
return result
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def check_dataset_exists(config: dict) -> bool:
|
| 38 |
+
"""Check if pre-processed dataset exists."""
|
| 39 |
+
data_config = config.get("data", {})
|
| 40 |
+
train_dir = data_config.get("train_dir")
|
| 41 |
+
eval_dir = data_config.get("eval_dir")
|
| 42 |
+
|
| 43 |
+
if train_dir and eval_dir:
|
| 44 |
+
train_path = Path(train_dir)
|
| 45 |
+
eval_path = Path(eval_dir)
|
| 46 |
+
# Check for Arrow files
|
| 47 |
+
if train_path.exists() and (train_path / "dataset_info.json").exists():
|
| 48 |
+
if eval_path.exists() and (eval_path / "dataset_info.json").exists():
|
| 49 |
+
return True
|
| 50 |
+
return False
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def check_model_available(model_name: str) -> bool:
|
| 54 |
+
"""Check if base model is available locally or can be downloaded."""
|
| 55 |
+
# Check if model is cached
|
| 56 |
+
model_cache = Path.home() / ".cache" / "huggingface" / "hub"
|
| 57 |
+
if model_cache.exists():
|
| 58 |
+
# Try to find model in cache
|
| 59 |
+
for cached in model_cache.glob(f"models--{model_name.replace('/', '--')}*"):
|
| 60 |
+
return True
|
| 61 |
+
|
| 62 |
+
print(f"Note: Model {model_name} will be downloaded if not cached")
|
| 63 |
+
return False
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def prepare_data(config: dict) -> None:
|
| 67 |
+
"""Prepare training data."""
|
| 68 |
+
print_header("Step 1: Preparing Dataset")
|
| 69 |
+
|
| 70 |
+
data_config = config.get("data", {})
|
| 71 |
+
model_config = config.get("model", {})
|
| 72 |
+
|
| 73 |
+
train_files = data_config.get("train_files", [])
|
| 74 |
+
val_file = data_config.get("val_file")
|
| 75 |
+
output_dir = data_config.get("train_dir", "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/data")
|
| 76 |
+
model_name = model_config.get("name", "Qwen/Qwen2.5-Coder-32B")
|
| 77 |
+
max_length = data_config.get("max_length", 4096)
|
| 78 |
+
|
| 79 |
+
# Build command
|
| 80 |
+
cmd = [
|
| 81 |
+
sys.executable,
|
| 82 |
+
"prepare_dataset.py",
|
| 83 |
+
"--output", output_dir,
|
| 84 |
+
"--model", model_name,
|
| 85 |
+
"--max-length", str(max_length),
|
| 86 |
+
]
|
| 87 |
+
|
| 88 |
+
for f in train_files:
|
| 89 |
+
cmd.extend(["--input", f])
|
| 90 |
+
|
| 91 |
+
if val_file:
|
| 92 |
+
cmd.extend(["--val-file", val_file])
|
| 93 |
+
|
| 94 |
+
# Run
|
| 95 |
+
workspace = Path(__file__).parent
|
| 96 |
+
run_command(cmd, cwd=str(workspace))
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def train_model(config: dict, resume_from: Optional[str] = None) -> None:
|
| 100 |
+
"""Run LoRA training."""
|
| 101 |
+
print_header("Step 2: Training LoRA")
|
| 102 |
+
|
| 103 |
+
workspace = Path(__file__).parent
|
| 104 |
+
|
| 105 |
+
# Build command
|
| 106 |
+
cmd = [
|
| 107 |
+
sys.executable,
|
| 108 |
+
"train_lora.py",
|
| 109 |
+
"--config", "train_config.yaml"
|
| 110 |
+
]
|
| 111 |
+
|
| 112 |
+
if resume_from:
|
| 113 |
+
cmd.extend(["--resume", resume_from])
|
| 114 |
+
|
| 115 |
+
run_command(cmd, cwd=str(workspace))
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def merge_model(config: dict) -> None:
|
| 119 |
+
"""Merge LoRA adapter with base model."""
|
| 120 |
+
print_header("Step 3: Merging LoRA Adapter")
|
| 121 |
+
|
| 122 |
+
output_config = config.get("output", {})
|
| 123 |
+
merge_config = config.get("merge", {})
|
| 124 |
+
|
| 125 |
+
lora_dir = output_config.get("lora_dir")
|
| 126 |
+
merged_dir = merge_config.get("output_dir", output_config.get("merged_dir"))
|
| 127 |
+
|
| 128 |
+
if not lora_dir:
|
| 129 |
+
print("❌ No LoRA directory specified in config")
|
| 130 |
+
return
|
| 131 |
+
|
| 132 |
+
if not merged_dir:
|
| 133 |
+
print("❌ No merged output directory specified in config")
|
| 134 |
+
return
|
| 135 |
+
|
| 136 |
+
# Check if LoRA checkpoint exists
|
| 137 |
+
lora_path = Path(lora_dir)
|
| 138 |
+
if not lora_path.exists():
|
| 139 |
+
print(f"❌ LoRA directory not found: {lora_dir}")
|
| 140 |
+
return
|
| 141 |
+
|
| 142 |
+
# Use merge_adapter.py if it exists
|
| 143 |
+
workspace = Path(__file__).parent
|
| 144 |
+
merge_script = workspace / "merge_adapter.py"
|
| 145 |
+
|
| 146 |
+
if merge_script.exists():
|
| 147 |
+
# Read and update merge script paths
|
| 148 |
+
with open(merge_script, 'r') as f:
|
| 149 |
+
content = f.read()
|
| 150 |
+
|
| 151 |
+
# Update paths in the script
|
| 152 |
+
content = content.replace(
|
| 153 |
+
'"/output/lora"',
|
| 154 |
+
f'"{lora_dir}"'
|
| 155 |
+
)
|
| 156 |
+
content = content.replace(
|
| 157 |
+
'"/output/merged"',
|
| 158 |
+
f'"{merged_dir}"'
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
# Write to temp file and run
|
| 162 |
+
import tempfile
|
| 163 |
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as tmp:
|
| 164 |
+
tmp.write(content)
|
| 165 |
+
tmp_path = tmp.name
|
| 166 |
+
|
| 167 |
+
try:
|
| 168 |
+
run_command([sys.executable, tmp_path])
|
| 169 |
+
finally:
|
| 170 |
+
os.unlink(tmp_path)
|
| 171 |
+
else:
|
| 172 |
+
print(f"Note: No merge script found at {merge_script}")
|
| 173 |
+
print(f" To merge manually, run:")
|
| 174 |
+
print(f" python -c \"from peft import PeftModel; from transformers import AutoModelForCausalLM; "
|
| 175 |
+
f"m = AutoModelForCausalLM.from_pretrained('{config['model']['name']}'); "
|
| 176 |
+
f"p = PeftModel.from_pretrained(m, '{lora_dir}'); p.merge_and_unload().save_pretrained('{merged_dir}')\"")
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def verify_model(config: dict) -> None:
|
| 180 |
+
"""Verify the trained model works."""
|
| 181 |
+
print_header("Step 4: Verifying Model")
|
| 182 |
+
|
| 183 |
+
output_config = config.get("output", {})
|
| 184 |
+
model_config = config.get("model", {})
|
| 185 |
+
|
| 186 |
+
# Try merged model first, then LoRA
|
| 187 |
+
merged_dir = output_config.get("merged_dir")
|
| 188 |
+
lora_dir = output_config.get("lora_dir")
|
| 189 |
+
|
| 190 |
+
model_path = merged_dir or lora_dir
|
| 191 |
+
|
| 192 |
+
if not model_path:
|
| 193 |
+
print("❌ No model output directory found")
|
| 194 |
+
return
|
| 195 |
+
|
| 196 |
+
if not Path(model_path).exists():
|
| 197 |
+
print(f"❌ Model directory not found: {model_path}")
|
| 198 |
+
return
|
| 199 |
+
|
| 200 |
+
print(f"✅ Model saved to: {model_path}")
|
| 201 |
+
|
| 202 |
+
# Quick test - try loading the model
|
| 203 |
+
print("\n🔍 Testing model loading...")
|
| 204 |
+
test_code = f"""
|
| 205 |
+
import torch
|
| 206 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 207 |
+
|
| 208 |
+
model_path = "{model_path}"
|
| 209 |
+
model_name = "{model_config.get('name', 'Qwen/Qwen2.5-Coder-32B')}"
|
| 210 |
+
|
| 211 |
+
try:
|
| 212 |
+
# Try loading merged model
|
| 213 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
| 214 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 215 |
+
model_path,
|
| 216 |
+
torch_dtype=torch.float16,
|
| 217 |
+
device_map="auto",
|
| 218 |
+
trust_remote_code=True
|
| 219 |
+
)
|
| 220 |
+
print(f"✅ Model loaded successfully!")
|
| 221 |
+
print(f" Parameters: {{model.num_parameters():,}}")
|
| 222 |
+
except Exception as e:
|
| 223 |
+
print(f"⚠️ Could not load merged model: {{e}}")
|
| 224 |
+
print(" This is normal if using LoRA adapter - use with PeftModel to load")
|
| 225 |
+
"""
|
| 226 |
+
|
| 227 |
+
result = subprocess.run(
|
| 228 |
+
[sys.executable, "-c", test_code],
|
| 229 |
+
capture_output=True,
|
| 230 |
+
text=True
|
| 231 |
+
)
|
| 232 |
+
print(result.stdout)
|
| 233 |
+
if result.stderr:
|
| 234 |
+
print(result.stderr)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def load_config(config_path: str) -> dict:
|
| 238 |
+
"""Load training configuration."""
|
| 239 |
+
with open(config_path, 'r') as f:
|
| 240 |
+
return yaml.safe_load(f)
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def main():
|
| 244 |
+
parser = argparse.ArgumentParser(description="Stack 2.9 Training Pipeline")
|
| 245 |
+
parser.add_argument(
|
| 246 |
+
"--config",
|
| 247 |
+
type=str,
|
| 248 |
+
default="train_config.yaml",
|
| 249 |
+
help="Path to training config file"
|
| 250 |
+
)
|
| 251 |
+
parser.add_argument(
|
| 252 |
+
"--skip-data-prep",
|
| 253 |
+
action="store_true",
|
| 254 |
+
help="Skip dataset preparation (use existing prepared data)"
|
| 255 |
+
)
|
| 256 |
+
parser.add_argument(
|
| 257 |
+
"--skip-merge",
|
| 258 |
+
action="store_true",
|
| 259 |
+
help="Skip LoRA merging step"
|
| 260 |
+
)
|
| 261 |
+
parser.add_argument(
|
| 262 |
+
"--skip-verify",
|
| 263 |
+
action="store_true",
|
| 264 |
+
help="Skip model verification"
|
| 265 |
+
)
|
| 266 |
+
parser.add_argument(
|
| 267 |
+
"--resume",
|
| 268 |
+
type=str,
|
| 269 |
+
default=None,
|
| 270 |
+
help="Resume training from checkpoint"
|
| 271 |
+
)
|
| 272 |
+
parser.add_argument(
|
| 273 |
+
"--steps",
|
| 274 |
+
nargs="+",
|
| 275 |
+
choices=["prepare", "train", "merge", "verify", "all"],
|
| 276 |
+
default=["all"],
|
| 277 |
+
help="Which steps to run"
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
args = parser.parse_args()
|
| 281 |
+
|
| 282 |
+
# Load config
|
| 283 |
+
workspace = Path(__file__).parent
|
| 284 |
+
config_path = workspace / args.config
|
| 285 |
+
|
| 286 |
+
if not config_path.exists():
|
| 287 |
+
print(f"❌ Config file not found: {config_path}")
|
| 288 |
+
sys.exit(1)
|
| 289 |
+
|
| 290 |
+
config = load_config(str(config_path))
|
| 291 |
+
|
| 292 |
+
print_header("Stack 2.9 Training Pipeline")
|
| 293 |
+
print(f"Config: {config_path}")
|
| 294 |
+
print(f"Model: {config.get('model', {}).get('name', 'Unknown')}")
|
| 295 |
+
|
| 296 |
+
# Determine steps to run
|
| 297 |
+
steps = args.steps
|
| 298 |
+
if "all" in steps:
|
| 299 |
+
steps = ["prepare", "train", "merge", "verify"]
|
| 300 |
+
|
| 301 |
+
# Run steps
|
| 302 |
+
try:
|
| 303 |
+
if "prepare" in steps:
|
| 304 |
+
if args.skip_data_prep:
|
| 305 |
+
print("\n⏭️ Skipping data preparation (--skip-data-prep)")
|
| 306 |
+
else:
|
| 307 |
+
if check_dataset_exists(config):
|
| 308 |
+
print("\n⏭️ Skipping data preparation (datasets already exist)")
|
| 309 |
+
response = input("Re-prepare? [y/N]: ")
|
| 310 |
+
if response.lower() != 'y':
|
| 311 |
+
pass
|
| 312 |
+
else:
|
| 313 |
+
prepare_data(config)
|
| 314 |
+
else:
|
| 315 |
+
prepare_data(config)
|
| 316 |
+
|
| 317 |
+
if "train" in steps:
|
| 318 |
+
train_model(config, args.resume)
|
| 319 |
+
|
| 320 |
+
if "merge" in steps:
|
| 321 |
+
if args.skip_merge:
|
| 322 |
+
print("\n⏭️ Skipping merge (--skip-merge)")
|
| 323 |
+
elif config.get("merge", {}).get("enabled", True):
|
| 324 |
+
merge_model(config)
|
| 325 |
+
else:
|
| 326 |
+
print("\n⏭️ Skipping merge (disabled in config)")
|
| 327 |
+
|
| 328 |
+
if "verify" in steps:
|
| 329 |
+
if args.skip_verify:
|
| 330 |
+
print("\n⏭️ Skipping verification (--skip-verify)")
|
| 331 |
+
else:
|
| 332 |
+
verify_model(config)
|
| 333 |
+
|
| 334 |
+
print_header("Pipeline Complete!")
|
| 335 |
+
print("🎉 Training pipeline finished successfully!")
|
| 336 |
+
|
| 337 |
+
except KeyboardInterrupt:
|
| 338 |
+
print("\n\n⚠️ Pipeline interrupted by user")
|
| 339 |
+
sys.exit(1)
|
| 340 |
+
except Exception as e:
|
| 341 |
+
print(f"\n❌ Pipeline error: {e}")
|
| 342 |
+
import traceback
|
| 343 |
+
traceback.print_exc()
|
| 344 |
+
sys.exit(1)
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
if __name__ == "__main__":
|
| 348 |
+
main()
|
stack-2.9-training/train_config.yaml
CHANGED
|
@@ -6,12 +6,15 @@ model:
|
|
| 6 |
trust_remote_code: true
|
| 7 |
torch_dtype: "bfloat16"
|
| 8 |
|
| 9 |
-
# Data Configuration
|
| 10 |
data:
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
| 12 |
train_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/data/train"
|
| 13 |
eval_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/data/eval"
|
| 14 |
-
max_length:
|
| 15 |
train_split: 0.9
|
| 16 |
test_split: 0.1
|
| 17 |
|
|
@@ -41,18 +44,19 @@ training:
|
|
| 41 |
weight_decay: 0.01
|
| 42 |
max_grad_norm: 1.0
|
| 43 |
logging_steps: 10
|
| 44 |
-
eval_steps:
|
| 45 |
-
save_steps:
|
| 46 |
save_total_limit: 3
|
| 47 |
-
fp16:
|
| 48 |
-
bf16:
|
| 49 |
gradient_checkpointing: true
|
|
|
|
| 50 |
|
| 51 |
# Output Configuration
|
| 52 |
output:
|
| 53 |
-
lora_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/output/stack-2.9-lora"
|
| 54 |
-
merged_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/output/stack-2.9-merged"
|
| 55 |
-
awq_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/output/stack-2.9-awq"
|
| 56 |
|
| 57 |
# Quantization Configuration
|
| 58 |
quantization:
|
|
@@ -62,13 +66,18 @@ quantization:
|
|
| 62 |
|
| 63 |
# Logging Configuration
|
| 64 |
logging:
|
| 65 |
-
report_to: "wandb"
|
| 66 |
wandb_project: "stack-2.9-training"
|
| 67 |
run_name: null
|
| 68 |
|
| 69 |
-
# Hardware Configuration
|
| 70 |
hardware:
|
| 71 |
-
device: "cuda"
|
| 72 |
num_gpus: 1
|
| 73 |
-
use_4bit: true
|
| 74 |
-
use_8bit: false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
trust_remote_code: true
|
| 7 |
torch_dtype: "bfloat16"
|
| 8 |
|
| 9 |
+
# Data Configuration - supports multiple training files
|
| 10 |
data:
|
| 11 |
+
train_files:
|
| 12 |
+
- "/Users/walidsobhi/.openclaw/workspace/stack-2.9/training-data/final/train.jsonl"
|
| 13 |
+
val_file: "/Users/walidsobhi/.openclaw/workspace/stack-2.9/training-data/final/val.jsonl"
|
| 14 |
+
test_file: "/Users/walidsobhi/.openclaw/workspace/stack-2.9/training-data/final/test.jsonl"
|
| 15 |
train_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/data/train"
|
| 16 |
eval_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/data/eval"
|
| 17 |
+
max_length: 4096 # Reduced for practical training on MPS/consumer GPUs
|
| 18 |
train_split: 0.9
|
| 19 |
test_split: 0.1
|
| 20 |
|
|
|
|
| 44 |
weight_decay: 0.01
|
| 45 |
max_grad_norm: 1.0
|
| 46 |
logging_steps: 10
|
| 47 |
+
eval_steps: 500
|
| 48 |
+
save_steps: 1000
|
| 49 |
save_total_limit: 3
|
| 50 |
+
fp16: false
|
| 51 |
+
bf16: true
|
| 52 |
gradient_checkpointing: true
|
| 53 |
+
optim: "adamw_torch"
|
| 54 |
|
| 55 |
# Output Configuration
|
| 56 |
output:
|
| 57 |
+
lora_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/output/stack-2.9-32b-lora"
|
| 58 |
+
merged_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/output/stack-2.9-32b-merged"
|
| 59 |
+
awq_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/output/stack-2.9-32b-awq"
|
| 60 |
|
| 61 |
# Quantization Configuration
|
| 62 |
quantization:
|
|
|
|
| 66 |
|
| 67 |
# Logging Configuration
|
| 68 |
logging:
|
| 69 |
+
report_to: "none" # Set to "wandb" to enable Weights & Biases tracking
|
| 70 |
wandb_project: "stack-2.9-training"
|
| 71 |
run_name: null
|
| 72 |
|
| 73 |
+
# Hardware Configuration
|
| 74 |
hardware:
|
| 75 |
+
device: "cuda" # Change to "mps" for Mac, "cpu" for CPU
|
| 76 |
num_gpus: 1
|
| 77 |
+
use_4bit: true # Enable 4-bit for 32B model on limited VRAM
|
| 78 |
+
use_8bit: false
|
| 79 |
+
|
| 80 |
+
# Merge Configuration (for after training completes)
|
| 81 |
+
merge:
|
| 82 |
+
enabled: true
|
| 83 |
+
output_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/output/stack-2.9-32b-merged"
|
stack-2.9-training/train_lora.py
CHANGED
|
@@ -169,7 +169,7 @@ def create_training_arguments(config: Dict[str, Any]) -> TrainingArguments:
|
|
| 169 |
weight_decay=training_config["weight_decay"],
|
| 170 |
max_grad_norm=training_config["max_grad_norm"],
|
| 171 |
fp16=training_config.get("fp16", True),
|
| 172 |
-
bf16=False, #
|
| 173 |
gradient_checkpointing=training_config.get("gradient_checkpointing", True),
|
| 174 |
logging_steps=training_config["logging_steps"],
|
| 175 |
eval_strategy="steps",
|
|
@@ -237,13 +237,17 @@ def train_lora(
|
|
| 237 |
eval_dir = data_config["eval_dir"]
|
| 238 |
|
| 239 |
# Check if it's a local disk dataset (saved with save_to_disk)
|
| 240 |
-
|
|
|
|
| 241 |
from datasets import load_from_disk
|
| 242 |
train_dataset = load_from_disk(train_dir)
|
| 243 |
eval_dataset = load_from_disk(eval_dir)
|
|
|
|
| 244 |
else:
|
|
|
|
| 245 |
train_dataset = load_dataset(train_dir)
|
| 246 |
eval_dataset = load_dataset(eval_dir)
|
|
|
|
| 247 |
|
| 248 |
print(f" Train samples: {len(train_dataset)}")
|
| 249 |
print(f" Eval samples: {len(eval_dataset)}")
|
|
|
|
| 169 |
weight_decay=training_config["weight_decay"],
|
| 170 |
max_grad_norm=training_config["max_grad_norm"],
|
| 171 |
fp16=training_config.get("fp16", True),
|
| 172 |
+
bf16=training_config.get("bf16", False), # Use config setting
|
| 173 |
gradient_checkpointing=training_config.get("gradient_checkpointing", True),
|
| 174 |
logging_steps=training_config["logging_steps"],
|
| 175 |
eval_strategy="steps",
|
|
|
|
| 237 |
eval_dir = data_config["eval_dir"]
|
| 238 |
|
| 239 |
# Check if it's a local disk dataset (saved with save_to_disk)
|
| 240 |
+
# save_to_disk creates dataset_info.json
|
| 241 |
+
if Path(train_dir).exists() and (Path(train_dir) / "dataset_info.json").exists():
|
| 242 |
from datasets import load_from_disk
|
| 243 |
train_dataset = load_from_disk(train_dir)
|
| 244 |
eval_dataset = load_from_disk(eval_dir)
|
| 245 |
+
print(f" Loaded pre-processed datasets from disk")
|
| 246 |
else:
|
| 247 |
+
# Try loading as JSONL or other format
|
| 248 |
train_dataset = load_dataset(train_dir)
|
| 249 |
eval_dataset = load_dataset(eval_dir)
|
| 250 |
+
print(f" Loaded datasets from: {train_dir}, {eval_dir}")
|
| 251 |
|
| 252 |
print(f" Train samples: {len(train_dataset)}")
|
| 253 |
print(f" Eval samples: {len(eval_dataset)}")
|
stack_cli/tools.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
"""
|
| 3 |
Stack 2.9 - Built-in Tools Module
|
| 4 |
-
|
| 5 |
"""
|
| 6 |
|
| 7 |
import os
|
|
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
"""
|
| 3 |
Stack 2.9 - Built-in Tools Module
|
| 4 |
+
38 powerful tools for file operations, git, code execution, web, memory, and planning.
|
| 5 |
"""
|
| 6 |
|
| 7 |
import os
|
test-results/test_results.xml
DELETED
|
@@ -1,112 +0,0 @@
|
|
| 1 |
-
<?xml version="1.0" encoding="utf-8"?><testsuites name="pytest tests"><testsuite name="pytest" errors="0" failures="29" skipped="0" tests="255" time="20.395" timestamp="2026-04-01T22:46:44.570037+02:00" hostname="192.168.1.8"><testcase classname="benchmarks.test_latency.TestQueryLatency" name="test_simple_query_latency" time="0.006" /><testcase classname="benchmarks.test_latency.TestQueryLatency" name="test_file_read_latency" time="0.002" /><testcase classname="benchmarks.test_latency.TestQueryLatency" name="test_git_operation_latency" time="2.506"><failure message="assert 2.5047659873962402 < 1.0">tests/benchmarks/test_latency.py:66: in test_git_operation_latency
|
| 2 |
-
assert elapsed < 1.0
|
| 3 |
-
E assert 2.5047659873962402 < 1.0</failure></testcase><testcase classname="benchmarks.test_latency.TestToolLatency" name="test_get_tool_latency" time="0.000" /><testcase classname="benchmarks.test_latency.TestToolLatency" name="test_list_tools_latency" time="0.000" /><testcase classname="benchmarks.test_latency.TestToolLatency" name="test_schemas_lookup_latency" time="0.002" /><testcase classname="benchmarks.test_latency.TestContextLatency" name="test_context_summary_latency" time="0.002" /><testcase classname="benchmarks.test_latency.TestContextLatency" name="test_workspace_context_latency" time="0.001" /><testcase classname="benchmarks.test_latency.TestAgentLatency" name="test_intent_parsing_latency" time="0.001" /><testcase classname="benchmarks.test_latency.TestAgentLatency" name="test_tool_selection_latency" time="0.010" /><testcase classname="benchmarks.test_latency.TestMemoryLatency" name="test_session_memory_latency" time="0.002" /><testcase classname="benchmarks.test_latency.TestMemoryLatency" name="test_summary_generation_latency" time="0.002" /><testcase classname="benchmarks.test_latency.TestOverallLatency" name="test_full_query_cycle_latency" time="0.013" /><testcase classname="benchmarks.test_latency.TestOverallLatency" name="test_batch_query_latency" time="0.062" /><testcase classname="benchmarks.test_memory_usage.TestMemoryUsage" name="test_agent_memory_baseline" time="0.001" /><testcase classname="benchmarks.test_memory_usage.TestMemoryUsage" name="test_conversation_history_memory" time="0.125" /><testcase classname="benchmarks.test_memory_usage.TestMemoryUsage" name="test_session_memory_growth" time="0.001" /><testcase classname="benchmarks.test_memory_usage.TestContextMemory" name="test_context_manager_memory" time="0.001" /><testcase classname="benchmarks.test_memory_usage.TestContextMemory" name="test_projects_dict_memory" time="0.001" /><testcase classname="benchmarks.test_memory_usage.TestToolMemory" name="test_tools_dict_memory" time="0.000" /><testcase classname="benchmarks.test_memory_usage.TestToolMemory" name="test_tool_schemas_memory" time="0.000" /><testcase classname="benchmarks.test_memory_usage.TestGarbageCollection" name="test_gc_after_agent_creation" time="0.045" /><testcase classname="benchmarks.test_memory_usage.TestGarbageCollection" name="test_gc_after_queries" time="0.099" /><testcase classname="benchmarks.test_memory_usage.TestMemoryLeaks" name="test_no_leak_in_loop" time="0.249" /><testcase classname="benchmarks.test_memory_usage.TestMemoryLeaks" name="test_session_cleanup" time="0.001" /><testcase classname="benchmarks.test_memory_usage.TestMemoryEfficiency" name="test_response_size" time="0.000" /><testcase classname="benchmarks.test_memory_usage.TestMemoryEfficiency" name="test_tool_call_size" time="0.000" /><testcase classname="benchmarks.test_memory_usage.TestResourceCleanup" name="test_context_cleanup" time="0.007" /><testcase classname="benchmarks.test_memory_usage.TestResourceCleanup" name="test_agent_disposal" time="0.001" /><testcase classname="benchmarks.test_throughput.TestConcurrentQueries" name="test_sequential_throughput" time="0.031" /><testcase classname="benchmarks.test_throughput.TestConcurrentQueries" name="test_rapid_fire_queries" time="0.017" /><testcase classname="benchmarks.test_throughput.TestThreadSafety" name="test_concurrent_agent_creation" time="0.008" /><testcase classname="benchmarks.test_throughput.TestThreadSafety" name="test_concurrent_tool_access" time="0.003" /><testcase classname="benchmarks.test_throughput.TestBatchProcessing" name="test_batch_file_operations" time="0.001"><failure message="NameError: name 'get_tool' is not defined">tests/benchmarks/test_throughput.py:129: in test_batch_file_operations
|
| 4 |
-
get_tool("read")(path=f)
|
| 5 |
-
^^^^^^^^
|
| 6 |
-
E NameError: name 'get_tool' is not defined</failure></testcase><testcase classname="benchmarks.test_throughput.TestBatchProcessing" name="test_batch_tool_chains" time="0.001"><failure message="NameError: name 'get_tool' is not defined">tests/benchmarks/test_throughput.py:151: in test_batch_tool_chains
|
| 7 |
-
get_tool(tool_name)(**params)
|
| 8 |
-
^^^^^^^^
|
| 9 |
-
E NameError: name 'get_tool' is not defined</failure></testcase><testcase classname="benchmarks.test_throughput.TestThroughputMetrics" name="test_queries_per_second" time="0.046" /><testcase classname="benchmarks.test_throughput.TestThroughputMetrics" name="test_tools_per_second" time="0.001"><failure message="NameError: name 'get_tool' is not defined">tests/benchmarks/test_throughput.py:193: in test_tools_per_second
|
| 10 |
-
get_tool("read")(path=f"file{i}.py")
|
| 11 |
-
^^^^^^^^
|
| 12 |
-
E NameError: name 'get_tool' is not defined</failure></testcase><testcase classname="benchmarks.test_throughput.TestConcurrentContext" name="test_concurrent_context_updates" time="0.001" /><testcase classname="benchmarks.test_throughput.TestResourceUtilization" name="test_memory_usage_stable" time="0.132" /><testcase classname="benchmarks.test_throughput.TestResourceUtilization" name="test_context_growth_bounded" time="0.001" /><testcase classname="benchmarks.test_token_efficiency.TestTokenUsage" name="test_response_token_efficiency" time="0.002" /><testcase classname="benchmarks.test_token_efficiency.TestTokenUsage" name="test_context_truncation" time="0.001" /><testcase classname="benchmarks.test_token_efficiency.TestPromptEfficiency" name="test_intent_parsing_tokens" time="0.000" /><testcase classname="benchmarks.test_token_efficiency.TestPromptEfficiency" name="test_tool_selection_tokens" time="0.000"><failure message="TypeError: isinstance expected 2 arguments, got 1">tests/benchmarks/test_token_efficiency.py:78: in test_tool_selection_tokens
|
| 13 |
-
assert isinstance(tools)
|
| 14 |
-
^^^^^^^^^^^^^^^^^
|
| 15 |
-
E TypeError: isinstance expected 2 arguments, got 1</failure></testcase><testcase classname="benchmarks.test_token_efficiency.TestResponseEfficiency" name="test_response_generation_size" time="0.001" /><testcase classname="benchmarks.test_token_efficiency.TestResponseEfficiency" name="test_clarification_efficiency" time="0.001" /><testcase classname="benchmarks.test_token_efficiency.TestContextTokenEfficiency" name="test_context_summary_size" time="0.001" /><testcase classname="benchmarks.test_token_efficiency.TestContextTokenEfficiency" name="test_workspace_context_size" time="0.001" /><testcase classname="benchmarks.test_token_efficiency.TestToolSchemasEfficiency" name="test_schemas_compactness" time="0.000" /><testcase classname="benchmarks.test_token_efficiency.TestToolSchemasEfficiency" name="test_schema_required_fields" time="0.000" /><testcase classname="benchmarks.test_token_efficiency.TestConversationEfficiency" name="test_history_truncation" time="0.065" /><testcase classname="benchmarks.test_token_efficiency.TestConversationEfficiency" name="test_summary_efficiency" time="0.001" /><testcase classname="benchmarks.test_token_efficiency.TestTokenOptimization" name="test_response_capping" time="0.000" /><testcase classname="benchmarks.test_token_efficiency.TestTokenOptimization" name="test_context_truncation_strategy" time="0.001" /><testcase classname="integration.test_agent_cli.TestAgentCLIIntegration" name="test_agent_in_cli" time="0.001" /><testcase classname="integration.test_agent_cli.TestAgentCLIIntegration" name="test_chat_mode_uses_agent" time="0.001" /><testcase classname="integration.test_agent_cli.TestAgentCLIIntegration" name="test_command_mode_uses_agent" time="0.001" /><testcase classname="integration.test_agent_cli.TestAgentContextIntegration" name="test_agent_uses_context_manager" time="0.001" /><testcase classname="integration.test_agent_cli.TestAgentContextIntegration" name="test_agent_records_tool_usage" time="0.002" /><testcase classname="integration.test_agent_cli.TestAgentContextIntegration" name="test_agent_gets_context" time="0.001" /><testcase classname="integration.test_agent_cli.TestAgentToolsIntegration" name="test_agent_gets_schemas" time="0.001" /><testcase classname="integration.test_agent_cli.TestAgentToolsIntegration" name="test_agent_process_with_forced_tools" time="0.001" /><testcase classname="integration.test_agent_cli.TestFullWorkflow" name="test_read_file_workflow" time="0.002" /><testcase classname="integration.test_agent_cli.TestFullWorkflow" name="test_write_file_workflow" time="0.002" /><testcase classname="integration.test_agent_cli.TestFullWorkflow" name="test_git_workflow" time="2.890" /><testcase classname="integration.test_agent_cli.TestMultiToolIntegration" name="test_read_then_process" time="0.004" /><testcase classname="integration.test_agent_cli.TestMultiToolIntegration" name="test_multiple_git_operations" time="2.324" /><testcase classname="integration.test_agent_cli.TestErrorIntegration" name="test_tool_failure_handling" time="0.004" /><testcase classname="integration.test_agent_cli.TestErrorIntegration" name="test_context_error_recovery" time="0.001" /><testcase classname="integration.test_cli.TestCLIComponents" name="test_print_banner" time="0.001" /><testcase classname="integration.test_cli.TestCLIComponents" name="test_cli_creation" time="0.001" /><testcase classname="integration.test_cli.TestCLIComponents" name="test_chat_mode_creation" time="0.000" /><testcase classname="integration.test_cli.TestCLIComponents" name="test_command_mode_creation" time="0.001" /><testcase classname="integration.test_cli.TestCLIComponents" name="test_voice_interface_creation" time="0.000" /><testcase classname="integration.test_cli.TestCLIWorkflows" name="test_run_interactive" time="0.000" /><testcase classname="integration.test_cli.TestCLIWorkflows" name="test_run_command" time="0.001" /><testcase classname="integration.test_cli.TestCLIWorkflows" name="test_run_tools" time="0.001" /><testcase classname="integration.test_cli.TestCLIArguments" name="test_cli_with_command_arg" time="0.000" /><testcase classname="integration.test_cli.TestCLIArguments" name="test_cli_with_tools_arg" time="0.000" /><testcase classname="integration.test_cli.TestCLIArguments" name="test_cli_with_output_arg" time="0.000" /><testcase classname="integration.test_cli.TestCLIArguments" name="test_cli_with_format_arg" time="0.000" /><testcase classname="integration.test_cli.TestOutputFormatting" name="test_format_text_output" time="0.000" /><testcase classname="integration.test_cli.TestOutputFormatting" name="test_format_json_output" time="0.000" /><testcase classname="integration.test_cli.TestOutputFormatting" name="test_format_list_output" time="0.000" /><testcase classname="integration.test_cli.TestCLIColors" name="test_colored_output_red" time="0.000" /><testcase classname="integration.test_cli.TestCLIColors" name="test_colored_output_green" time="0.000" /><testcase classname="integration.test_cli.TestCLIColors" name="test_colored_output_cyan" time="0.000" /><testcase classname="integration.test_cli.TestMainFunction" name="test_main_defaults" time="0.000" /><testcase classname="integration.test_cli.TestMainFunction" name="test_main_with_command" time="0.000" /><testcase classname="integration.test_cli.TestCLIErrors" name="test_cli_keyboard_interrupt" time="0.000" /><testcase classname="integration.test_cli.TestCLIErrors" name="test_cli_general_exception" time="0.000" /><testcase classname="integration.test_self_evolution.TestSelfReflection" name="test_reflection_high_confidence" time="0.000" /><testcase classname="integration.test_self_evolution.TestSelfReflection" name="test_reflection_low_confidence" time="0.000" /><testcase classname="integration.test_self_evolution.TestSelfReflection" name="test_reflection_suggestion" time="0.000" /><testcase classname="integration.test_self_evolution.TestSelfImprovementCycle" name="test_agent_learns_from_errors" time="0.002" /><testcase classname="integration.test_self_evolution.TestSelfImprovementCycle" name="test_conversation_history_tracking" time="0.005" /><testcase classname="integration.test_self_evolution.TestSelfImprovementCycle" name="test_reflection_updates_confidence" time="1.650" /><testcase classname="integration.test_self_evolution.TestAdaptiveToolSelection" name="test_tool_selection_based_on_intent" time="0.000" /><testcase classname="integration.test_self_evolution.TestAdaptiveToolSelection" name="test_parameter_extraction_improves" time="0.000" /><testcase classname="integration.test_self_evolution.TestContextAwareImprovement" name="test_context_influences_response" time="0.004" /><testcase classname="integration.test_self_evolution.TestContextAwareImprovement" name="test_session_memory_persists" time="0.002" /><testcase classname="integration.test_self_evolution.TestSelfEvolutionIntegration" name="test_full_self_improvement_loop" time="0.848" /><testcase classname="integration.test_self_evolution.TestSelfEvolutionIntegration" name="test_error_recovery_improves" time="0.006" /><testcase classname="integration.test_self_evolution.TestSelfEvolutionIntegration" name="test_performance_tracking" time="0.001" /><testcase classname="integration.test_self_evolution.TestContinuousLearning" name="test_query_patterns_learned" time="0.006" /><testcase classname="integration.test_self_evolution.TestContinuousLearning" name="test_tool_usage_stats" time="2.824" /><testcase classname="integration.test_tool_chains.TestToolChains" name="test_read_grep_chain" time="0.002"><failure message="assert False is True">tests/integration/test_tool_chains.py:40: in test_read_grep_chain
|
| 16 |
-
assert read_result["success"] is True
|
| 17 |
-
E assert False is True</failure></testcase><testcase classname="integration.test_tool_chains.TestToolChains" name="test_search_copy_chain" time="0.050"><failure message="assert False is True">tests/integration/test_tool_chains.py:58: in test_search_copy_chain
|
| 18 |
-
assert copy_result["success"] is True
|
| 19 |
-
E assert False is True</failure></testcase><testcase classname="integration.test_tool_chains.TestToolChains" name="test_git_status_branch_chain" time="0.033" /><testcase classname="integration.test_tool_chains.TestComplexToolSequences" name="test_file_edit_save_sequence" time="0.001"><failure message="assert False is True">tests/integration/test_tool_chains.py:107: in test_file_edit_save_sequence
|
| 20 |
-
assert edit_result["success"] is True
|
| 21 |
-
E assert False is True</failure></testcase><testcase classname="integration.test_tool_chains.TestComplexToolSequences" name="test_code_test_lint_sequence" time="0.763"><failure message="AssertionError: assert (False or 'output' in {'error': 'ruff not found', 'success': False}) + where False = <built-in method get of dict object at 0x10b9a9000>('success') + where <built-in method get of dict object at 0x10b9a9000> = {'error': 'ruff not found', 'success': False}.get">tests/integration/test_tool_chains.py:125: in test_code_test_lint_sequence
|
| 22 |
-
assert lint_result.get("success") or "output" in lint_result
|
| 23 |
-
E AssertionError: assert (False or 'output' in {'error': 'ruff not found', 'success': False})
|
| 24 |
-
E + where False = <built-in method get of dict object at 0x10b9a9000>('success')
|
| 25 |
-
E + where <built-in method get of dict object at 0x10b9a9000> = {'error': 'ruff not found', 'success': False}.get</failure></testcase><testcase classname="integration.test_tool_chains.TestComplexToolSequences" name="test_project_scan_context_sequence" time="0.003" /><testcase classname="integration.test_tool_chains.TestToolChainErrors" name="test_chain_continues_on_error" time="0.001"><failure message="assert 0 == 2">tests/integration/test_tool_chains.py:174: in test_chain_continues_on_error
|
| 26 |
-
assert call_count[0] == 2
|
| 27 |
-
E assert 0 == 2</failure></testcase><testcase classname="integration.test_tool_chains.TestToolChainErrors" name="test_rollback_on_error" time="0.001" /><testcase classname="integration.test_tool_chains.TestParallelToolExecution" name="test_parallel_file_operations" time="0.000"><failure message="assert False is True">tests/integration/test_tool_chains.py:214: in test_parallel_file_operations
|
| 28 |
-
assert result1["success"] is True
|
| 29 |
-
E assert False is True</failure></testcase><testcase classname="integration.test_tool_chains.TestToolDependencyResolution" name="test_git_needs_repo" time="0.017" /><testcase classname="integration.test_tool_chains.TestToolDependencyResolution" name="test_edit_needs_file" time="0.000" /><testcase classname="integration.test_tool_chains.TestToolDependencyResolution" name="test_memory_needs_workspace" time="0.000" /><testcase classname="integration.test_tool_chains.TestToolChainsPerformance" name="test_rapid_tool_calls" time="0.001" /><testcase classname="integration.test_tool_chains.TestToolChainsPerformance" name="test_memory_efficiency" time="4.768" /><testcase classname="unit.test_agent.TestQueryIntent" name="test_intent_values" time="0.001" /><testcase classname="unit.test_agent.TestToolCall" name="test_tool_call_creation" time="0.000" /><testcase classname="unit.test_agent.TestToolCall" name="test_tool_call_with_error" time="0.000" /><testcase classname="unit.test_agent.TestAgentResponse" name="test_agent_response_creation" time="0.000" /><testcase classname="unit.test_agent.TestQueryUnderstanding" name="test_parse_file_read" time="0.000" /><testcase classname="unit.test_agent.TestQueryUnderstanding" name="test_parse_file_write" time="0.000" /><testcase classname="unit.test_agent.TestQueryUnderstanding" name="test_parse_git_operation" time="0.000" /><testcase classname="unit.test_agent.TestQueryUnderstanding" name="test_parse_web_search" time="0.000" /><testcase classname="unit.test_agent.TestQueryUnderstanding" name="test_parse_general" time="0.000" /><testcase classname="unit.test_agent.TestQueryUnderstanding" name="test_parse_case_insensitive" time="0.000" /><testcase classname="unit.test_agent.TestQueryUnderstanding" name="test_extract_file_path" time="0.000" /><testcase classname="unit.test_agent.TestToolSelector" name="test_select_file_read_tools" time="0.000" /><testcase classname="unit.test_agent.TestToolSelector" name="test_select_git_tools" time="0.000" /><testcase classname="unit.test_agent.TestToolSelector" name="test_select_web_search_tools" time="0.000" /><testcase classname="unit.test_agent.TestToolSelector" name="test_select_general_tools" time="0.000" /><testcase classname="unit.test_agent.TestToolSelector" name="test_get_tool_parameters_read" time="0.000" /><testcase classname="unit.test_agent.TestToolSelector" name="test_get_tool_parameters_git_commit" time="0.000" /><testcase classname="unit.test_agent.TestResponseGenerator" name="test_generate_empty" time="0.001" /><testcase classname="unit.test_agent.TestResponseGenerator" name="test_generate_with_success" time="0.001"><failure message="assert 'read' in "here's the content:\n```\ntest content\n```" + where "here's the content:\n```\ntest content\n```" = <built-in method lower of str object at 0x10b9d30f0>() + where <built-in method lower of str object at 0x10b9d30f0> = "Here's the content:\n```\ntest content\n```".lower">tests/unit/test_agent.py:223: in test_generate_with_success
|
| 30 |
-
assert "read" in result.lower()
|
| 31 |
-
E assert 'read' in "here's the content:\n```\ntest content\n```"
|
| 32 |
-
E + where "here's the content:\n```\ntest content\n```" = <built-in method lower of str object at 0x10b9d30f0>()
|
| 33 |
-
E + where <built-in method lower of str object at 0x10b9d30f0> = "Here's the content:\n```\ntest content\n```".lower</failure></testcase><testcase classname="unit.test_agent.TestResponseGenerator" name="test_generate_with_error" time="0.000" /><testcase classname="unit.test_agent.TestResponseGenerator" name="test_generate_clarification" time="0.000" /><testcase classname="unit.test_agent.TestSelfReflection" name="test_reflect_high_confidence" time="0.000" /><testcase classname="unit.test_agent.TestSelfReflection" name="test_reflect_low_confidence" time="0.000" /><testcase classname="unit.test_agent.TestSelfReflection" name="test_reflect_empty_response" time="0.000" /><testcase classname="unit.test_agent.TestStackAgent" name="test_agent_creation" time="0.001" /><testcase classname="unit.test_agent.TestStackAgent" name="test_process_simple_query" time="0.003" /><testcase classname="unit.test_agent.TestStackAgent" name="test_process_with_tools" time="0.001" /><testcase classname="unit.test_agent.TestStackAgent" name="test_get_context" time="0.001" /><testcase classname="unit.test_agent.TestStackAgent" name="test_get_schemas" time="0.001" /><testcase classname="unit.test_agent.TestCreateAgent" name="test_create_agent_default" time="0.001" /><testcase classname="unit.test_agent.TestCreateAgent" name="test_create_agent_custom_workspace" time="0.001"><failure message="FileNotFoundError: [Errno 2] No such file or directory: '/custom/path'">tests/unit/test_agent.py:360: in test_create_agent_custom_workspace
|
| 34 |
-
agent = create_agent("/custom/path")
|
| 35 |
-
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 36 |
-
stack_cli/agent.py:511: in create_agent
|
| 37 |
-
return StackAgent(workspace)
|
| 38 |
-
^^^^^^^^^^^^^^^^^^^^^
|
| 39 |
-
stack_cli/agent.py:393: in __init__
|
| 40 |
-
self.context_manager = create_context_manager(workspace)
|
| 41 |
-
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 42 |
-
stack_cli/context.py:337: in create_context_manager
|
| 43 |
-
return ContextManager(workspace or "/Users/walidsobhi/.openclaw/workspace")
|
| 44 |
-
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 45 |
-
stack_cli/context.py:100: in __init__
|
| 46 |
-
self._load_context()
|
| 47 |
-
stack_cli/context.py:120: in _load_context
|
| 48 |
-
self._scan_projects()
|
| 49 |
-
stack_cli/context.py:124: in _scan_projects
|
| 50 |
-
for item in self.workspace.iterdir():
|
| 51 |
-
^^^^^^^^^^^^^^^^^^^^^^^^
|
| 52 |
-
/opt/homebrew/Cellar/python@3.14/3.14.3_1/Frameworks/Python.framework/Versions/3.14/lib/python3.14/pathlib/__init__.py:836: in iterdir
|
| 53 |
-
with os.scandir(root_dir) as scandir_it:
|
| 54 |
-
^^^^^^^^^^^^^^^^^^^^
|
| 55 |
-
E FileNotFoundError: [Errno 2] No such file or directory: '/custom/path'</failure></testcase><testcase classname="unit.test_config.TestConfiguration" name="test_default_workspace_path" time="0.001" /><testcase classname="unit.test_config.TestConfiguration" name="test_custom_workspace_path" time="0.002"><failure message="assert "<MagicMock n...'4489912608'>" == '/custom/path' - /custom/path + <MagicMock name='Path()' id='4489912608'>">tests/unit/test_config.py:38: in test_custom_workspace_path
|
| 56 |
-
assert str(cm.workspace) == "/custom/path"
|
| 57 |
-
E assert "<MagicMock n...'4489912608'>" == '/custom/path'
|
| 58 |
-
E
|
| 59 |
-
E - /custom/path
|
| 60 |
-
E + <MagicMock name='Path()' id='4489912608'></failure></testcase><testcase classname="unit.test_config.TestEnvironmentVariables" name="test_workspace_from_env" time="0.001" /><testcase classname="unit.test_config.TestToolConfiguration" name="test_tool_timeout_defaults" time="0.000" /><testcase classname="unit.test_config.TestToolConfiguration" name="test_git_command_timeout" time="0.000" /><testcase classname="unit.test_config.TestLoggingConfiguration" name="test_logging_setup" time="0.000" /><testcase classname="unit.test_context.TestContextManagerBasics" name="test_init_with_workspace" time="0.002"><failure message="AssertionError: assert <MagicMock name='Path()' id='4491019664'> == PosixPath('/custom/workspace') + where <MagicMock name='Path()' id='4491019664'> = <stack_cli.context.ContextManager object at 0x10bb95ef0>.workspace + and PosixPath('/custom/workspace') = Path('/custom/workspace')">tests/unit/test_context.py:31: in test_init_with_workspace
|
| 61 |
-
assert cm.workspace == Path("/custom/workspace")
|
| 62 |
-
E AssertionError: assert <MagicMock name='Path()' id='4491019664'> == PosixPath('/custom/workspace')
|
| 63 |
-
E + where <MagicMock name='Path()' id='4491019664'> = <stack_cli.context.ContextManager object at 0x10bb95ef0>.workspace
|
| 64 |
-
E + and PosixPath('/custom/workspace') = Path('/custom/workspace')</failure></testcase><testcase classname="unit.test_context.TestContextManagerBasics" name="test_init_loads_context" time="0.001" /><testcase classname="unit.test_context.TestContextManagerBasics" name="test_session_attribute" time="0.001" /><testcase classname="unit.test_context.TestContextManagerProjects" name="test_projects_dict_exists" time="0.001" /><testcase classname="unit.test_context.TestContextManagerProjects" name="test_current_project_initially_none" time="0.001" /><testcase classname="unit.test_context.TestContextManagerMethods" name="test_get_context_summary_returns_dict" time="0.001" /><testcase classname="unit.test_context.TestContextManagerMethods" name="test_get_workspace_context_returns_string" time="0.001" /><testcase classname="unit.test_context.TestContextManagerMethods" name="test_search_memory_returns_list" time="0.002" /><testcase classname="unit.test_context.TestContextManagerMethods" name="test_save_to_memory" time="0.002" /><testcase classname="unit.test_context.TestContextManagerProjectLoading" name="test_load_project_not_exists" time="0.002"><failure message="assert ProjectContext(name='nonexistent', path="<MagicMock name='Path().__truediv__()' id='4493889888'>", language='javascript', framework=None, files=[], dirs=[], has_git=<MagicMock name='Path().__truediv__().__truediv__().exists()' id='4493894256'>, dependencies=[], entry_points=[], metadata={}) is None">tests/unit/test_context.py:135: in test_load_project_not_exists
|
| 65 |
-
assert result is None
|
| 66 |
-
E assert ProjectContext(name='nonexistent', path="<MagicMock name='Path().__truediv__()' id='4493889888'>", language='javascript', framework=None, files=[], dirs=[], has_git=<MagicMock name='Path().__truediv__().__truediv__().exists()' id='4493894256'>, dependencies=[], entry_points=[], metadata={}) is None</failure></testcase><testcase classname="unit.test_context.TestContextManagerProjectLoading" name="test_load_project_exists" time="0.003" /><testcase classname="unit.test_context.TestContextManagerRecentContext" name="test_get_recent_context" time="0.002" /><testcase classname="unit.test_memory.TestProjectContext" name="test_project_context_creation" time="0.000" /><testcase classname="unit.test_memory.TestProjectContext" name="test_project_context_defaults" time="0.000" /><testcase classname="unit.test_memory.TestSessionMemory" name="test_session_memory_creation" time="0.000" /><testcase classname="unit.test_memory.TestSessionMemory" name="test_add_message" time="0.000" /><testcase classname="unit.test_memory.TestSessionMemory" name="test_add_tool_usage" time="0.000" /><testcase classname="unit.test_memory.TestSessionMemory" name="test_add_file_touched" time="0.000" /><testcase classname="unit.test_memory.TestSessionMemory" name="test_add_command" time="0.000" /><testcase classname="unit.test_memory.TestSessionMemory" name="test_get_summary" time="0.000" /><testcase classname="unit.test_memory.TestContextManager" name="test_context_manager_creation" time="0.001"><failure message="AssertionError: assert <MagicMock name='Path()' id='4492458176'> == PosixPath('/tmp/test') + where <MagicMock name='Path()' id='4492458176'> = <stack_cli.context.ContextManager object at 0x10b5bb850>.workspace + and PosixPath('/tmp/test') = Path('/tmp/test')">tests/unit/test_memory.py:121: in test_context_manager_creation
|
| 67 |
-
assert cm.workspace == Path("/tmp/test")
|
| 68 |
-
E AssertionError: assert <MagicMock name='Path()' id='4492458176'> == PosixPath('/tmp/test')
|
| 69 |
-
E + where <MagicMock name='Path()' id='4492458176'> = <stack_cli.context.ContextManager object at 0x10b5bb850>.workspace
|
| 70 |
-
E + and PosixPath('/tmp/test') = Path('/tmp/test')</failure></testcase><testcase classname="unit.test_memory.TestContextManager" name="test_load_context" time="0.001" /><testcase classname="unit.test_memory.TestContextManager" name="test_get_context_summary" time="0.001" /><testcase classname="unit.test_memory.TestContextManager" name="test_get_workspace_context" time="0.001" /><testcase classname="unit.test_memory.TestContextManager" name="test_search_memory" time="0.001"><failure message="FileNotFoundError: [Errno 2] No such file or directory: '/tmp/test'">tests/unit/test_memory.py:170: in test_search_memory
|
| 71 |
-
cm = ContextManager("/tmp/test")
|
| 72 |
-
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 73 |
-
stack_cli/context.py:100: in __init__
|
| 74 |
-
self._load_context()
|
| 75 |
-
stack_cli/context.py:120: in _load_context
|
| 76 |
-
self._scan_projects()
|
| 77 |
-
stack_cli/context.py:124: in _scan_projects
|
| 78 |
-
for item in self.workspace.iterdir():
|
| 79 |
-
^^^^^^^^^^^^^^^^^^^^^^^^
|
| 80 |
-
/opt/homebrew/Cellar/python@3.14/3.14.3_1/Frameworks/Python.framework/Versions/3.14/lib/python3.14/pathlib/__init__.py:836: in iterdir
|
| 81 |
-
with os.scandir(root_dir) as scandir_it:
|
| 82 |
-
^^^^^^^^^^^^^^^^^^^^
|
| 83 |
-
E FileNotFoundError: [Errno 2] No such file or directory: '/tmp/test'</failure></testcase><testcase classname="unit.test_memory.TestContextManager" name="test_save_to_memory" time="0.001"><failure message="NameError: name 'mock_open' is not defined">tests/unit/test_memory.py:179: in test_save_to_memory
|
| 84 |
-
with patch('pathlib.Path.open', mock_open(read_data="")):
|
| 85 |
-
^^^^^^^^^
|
| 86 |
-
E NameError: name 'mock_open' is not defined</failure></testcase><testcase classname="unit.test_memory.TestProjectAware" name="test_project_aware_creation" time="0.001" /><testcase classname="unit.test_memory.TestProjectAware" name="test_detect_project" time="0.001" /><testcase classname="unit.test_memory.TestProjectAware" name="test_get_project_context" time="0.001" /><testcase classname="unit.test_memory.TestProjectAware" name="test_format_context_for_prompt" time="0.001" /><testcase classname="unit.test_memory.TestCreateContextManager" name="test_create_context_manager_default" time="0.000" /><testcase classname="unit.test_memory.TestCreateContextManager" name="test_create_context_manager_custom" time="0.000" /><testcase classname="unit.test_tools.TestToolsRegistry" name="test_tools_count" time="0.000" /><testcase classname="unit.test_tools.TestToolsRegistry" name="test_get_tool_valid" time="0.000" /><testcase classname="unit.test_tools.TestToolsRegistry" name="test_get_tool_invalid" time="0.000" /><testcase classname="unit.test_tools.TestToolsRegistry" name="test_get_tool_schemas" time="0.000" /><testcase classname="unit.test_tools.TestFileOperations" name="test_read_file_success" time="0.002" /><testcase classname="unit.test_tools.TestFileOperations" name="test_read_file_not_found" time="0.000" /><testcase classname="unit.test_tools.TestFileOperations" name="test_read_file_with_limit" time="0.001" /><testcase classname="unit.test_tools.TestFileOperations" name="test_write_file_success" time="0.001" /><testcase classname="unit.test_tools.TestFileOperations" name="test_write_file_creates_dirs" time="0.002" /><testcase classname="unit.test_tools.TestFileOperations" name="test_edit_file_success" time="0.001" /><testcase classname="unit.test_tools.TestFileOperations" name="test_edit_file_not_found" time="0.000" /><testcase classname="unit.test_tools.TestFileOperations" name="test_edit_file_text_not_found" time="0.001" /><testcase classname="unit.test_tools.TestFileOperations" name="test_search_files" time="0.001" /><testcase classname="unit.test_tools.TestFileOperations" name="test_grep_basic" time="0.001" /><testcase classname="unit.test_tools.TestFileOperations" name="test_grep_with_context" time="0.001" /><testcase classname="unit.test_tools.TestFileOperations" name="test_copy_file" time="0.001" /><testcase classname="unit.test_tools.TestFileOperations" name="test_move_file" time="0.001" /><testcase classname="unit.test_tools.TestFileOperations" name="test_delete_file_without_force" time="0.001" /><testcase classname="unit.test_tools.TestFileOperations" name="test_delete_file_with_force" time="0.001" /><testcase classname="unit.test_tools.TestGitOperations" name="test_git_status_no_repo" time="0.020"><failure message="assert True is False">tests/unit/test_tools.py:213: in test_git_status_no_repo
|
| 87 |
-
assert result["success"] is False
|
| 88 |
-
E assert True is False</failure></testcase><testcase classname="unit.test_tools.TestGitOperations" name="test_git_status_success" time="0.079" /><testcase classname="unit.test_tools.TestGitOperations" name="test_git_commit" time="0.111" /><testcase classname="unit.test_tools.TestGitOperations" name="test_git_push" time="0.003"><failure message="NameError: name 'temp_git_repo' is not defined">tests/unit/test_tools.py:246: in test_git_push
|
| 89 |
-
result = tool_git_push(str(temp_git_repo))
|
| 90 |
-
^^^^^^^^^^^^^
|
| 91 |
-
E NameError: name 'temp_git_repo' is not defined</failure></testcase><testcase classname="unit.test_tools.TestGitOperations" name="test_git_pull" time="0.001"><failure message="NameError: name 'temp_git_repo' is not defined">tests/unit/test_tools.py:257: in test_git_pull
|
| 92 |
-
result = tool_git_pull(str(temp_git_repo))
|
| 93 |
-
^^^^^^^^^^^^^
|
| 94 |
-
E NameError: name 'temp_git_repo' is not defined</failure></testcase><testcase classname="unit.test_tools.TestGitOperations" name="test_git_branch_list" time="0.001"><failure message="NameError: name 'temp_git_repo' is not defined">tests/unit/test_tools.py:268: in test_git_branch_list
|
| 95 |
-
result = tool_git_branch(str(temp_git_repo))
|
| 96 |
-
^^^^^^^^^^^^^
|
| 97 |
-
E NameError: name 'temp_git_repo' is not defined</failure></testcase><testcase classname="unit.test_tools.TestGitOperations" name="test_git_log" time="0.001"><failure message="NameError: name 'temp_git_repo' is not defined">tests/unit/test_tools.py:280: in test_git_log
|
| 98 |
-
result = tool_git_log(str(temp_git_repo))
|
| 99 |
-
^^^^^^^^^^^^^
|
| 100 |
-
E NameError: name 'temp_git_repo' is not defined</failure></testcase><testcase classname="unit.test_tools.TestGitOperations" name="test_git_diff" time="0.001"><failure message="NameError: name 'temp_git_repo' is not defined">tests/unit/test_tools.py:291: in test_git_diff
|
| 101 |
-
result = tool_git_diff(str(temp_git_repo))
|
| 102 |
-
^^^^^^^^^^^^^
|
| 103 |
-
E NameError: name 'temp_git_repo' is not defined</failure></testcase><testcase classname="unit.test_tools.TestCodeExecution" name="test_run_command_success" time="0.001" /><testcase classname="unit.test_tools.TestCodeExecution" name="test_run_command_failure" time="0.001" /><testcase classname="unit.test_tools.TestCodeExecution" name="test_run_command_timeout" time="0.001"><failure message="NameError: name 'subprocess' is not defined. Did you forget to import 'subprocess'?">tests/unit/test_tools.py:329: in test_run_command_timeout
|
| 104 |
-
mock_run.side_effect = subprocess.TimeoutExpired("cmd", 1)
|
| 105 |
-
^^^^^^^^^^
|
| 106 |
-
E NameError: name 'subprocess' is not defined. Did you forget to import 'subprocess'?</failure></testcase><testcase classname="unit.test_tools.TestCodeExecution" name="test_run_tests" time="0.001" /><testcase classname="unit.test_tools.TestCodeExecution" name="test_lint_code" time="0.001" /><testcase classname="unit.test_tools.TestCodeExecution" name="test_format_code" time="0.001" /><testcase classname="unit.test_tools.TestCodeExecution" name="test_check_type" time="0.001" /><testcase classname="unit.test_tools.TestCodeExecution" name="test_start_server_background" time="0.001" /><testcase classname="unit.test_tools.TestWebTools" name="test_web_search" time="0.000" /><testcase classname="unit.test_tools.TestWebTools" name="test_web_fetch" time="0.001" /><testcase classname="unit.test_tools.TestWebTools" name="test_check_url" time="0.003" /><testcase classname="unit.test_tools.TestMemoryTools" name="test_memory_recall" time="0.002" /><testcase classname="unit.test_tools.TestMemoryTools" name="test_memory_save" time="0.001" /><testcase classname="unit.test_tools.TestMemoryTools" name="test_memory_list" time="0.001" /><testcase classname="unit.test_tools.TestMemoryTools" name="test_context_load" time="0.001" /><testcase classname="unit.test_tools.TestMemoryTools" name="test_project_scan" time="0.002" /><testcase classname="unit.test_tools.TestTaskPlanningTools" name="test_create_task" time="0.001" /><testcase classname="unit.test_tools.TestTaskPlanningTools" name="test_list_tasks" time="0.000" /><testcase classname="unit.test_tools.TestTaskPlanningTools" name="test_update_task" time="0.001"><failure message="assert False is True">tests/unit/test_tools.py:512: in test_update_task
|
| 107 |
-
assert result["success"] is True
|
| 108 |
-
E assert False is True</failure></testcase><testcase classname="unit.test_tools.TestTaskPlanningTools" name="test_delete_task" time="0.001"><failure message="assert False is True">tests/unit/test_tools.py:522: in test_delete_task
|
| 109 |
-
assert result["success"] is True
|
| 110 |
-
E assert False is True</failure></testcase><testcase classname="unit.test_tools.TestTaskPlanningTools" name="test_create_plan" time="0.001" /><testcase classname="unit.test_tools.TestTaskPlanningTools" name="test_execute_plan" time="0.001"><failure message="assert False is True">tests/unit/test_tools.py:540: in test_execute_plan
|
| 111 |
-
assert result["success"] is True
|
| 112 |
-
E assert False is True</failure></testcase><testcase classname="unit.test_utils.TestCLIUtilities" name="test_print_colored" time="0.000" /><testcase classname="unit.test_utils.TestCLIUtilities" name="test_print_colored_with_bold" time="0.001" /><testcase classname="unit.test_utils.TestCLIUtilities" name="test_format_output_dict" time="0.000" /><testcase classname="unit.test_utils.TestCLIUtilities" name="test_format_output_list" time="0.000" /><testcase classname="unit.test_utils.TestCLIUtilities" name="test_format_output_json" time="0.001" /><testcase classname="unit.test_utils.TestCLIUtilities" name="test_format_output_string" time="0.000" /><testcase classname="unit.test_utils.TestAgentUtilities" name="test_intent_parsing_utility" time="0.000" /><testcase classname="unit.test_utils.TestAgentUtilities" name="test_tool_selection_utility" time="0.000" /><testcase classname="unit.test_utils.TestAgentUtilities" name="test_file_path_extraction" time="0.000" /><testcase classname="unit.test_utils.TestContextUtilities" name="test_session_summary_format" time="0.000" /><testcase classname="unit.test_utils.TestContextUtilities" name="test_context_summary_format" time="0.001" /><testcase classname="unit.test_utils.TestToolSchemas" name="test_schemas_contain_required_fields" time="0.002" /><testcase classname="unit.test_utils.TestToolSchemas" name="test_schemas_parameters_structure" time="0.000" /><testcase classname="unit.test_utils.TestDateTimeUtilities" name="test_timestamp_format" time="0.000" /><testcase classname="unit.test_utils.TestPathUtilities" name="test_path_resolution" time="0.000" /><testcase classname="unit.test_utils.TestPathUtilities" name="test_path_parent" time="0.000" /><testcase classname="unit.test_utils.TestPathUtilities" name="test_path_name" time="0.000" /></testsuite></testsuites>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
test.py
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
new_content
|
|
|
|
|
|
test_imports.py
DELETED
|
@@ -1,51 +0,0 @@
|
|
| 1 |
-
#!/usr/bin/env python3
|
| 2 |
-
"""Test Stack 2.9 CLI imports and basic functionality."""
|
| 3 |
-
|
| 4 |
-
import sys
|
| 5 |
-
from pathlib import Path
|
| 6 |
-
|
| 7 |
-
# Ensure we can import from stack_cli
|
| 8 |
-
stack_cli_path = Path(__file__).parent / "stack_cli"
|
| 9 |
-
if str(stack_cli_path) not in sys.path:
|
| 10 |
-
sys.path.insert(0, str(stack_cli_path))
|
| 11 |
-
|
| 12 |
-
print("Testing Stack 2.9 CLI and Agent Interface...")
|
| 13 |
-
print("="*60)
|
| 14 |
-
|
| 15 |
-
try:
|
| 16 |
-
from stack_cli import tools, agent, context
|
| 17 |
-
print("✓ All modules import successfully")
|
| 18 |
-
except ImportError as e:
|
| 19 |
-
print(f"✗ Import error: {e}")
|
| 20 |
-
sys.exit(1)
|
| 21 |
-
|
| 22 |
-
try:
|
| 23 |
-
tools_list = tools.list_tools()
|
| 24 |
-
print(f"✓ Tools available: {len(tools_list)}")
|
| 25 |
-
except Exception as e:
|
| 26 |
-
print(f"✗ Tools list error: {e}")
|
| 27 |
-
|
| 28 |
-
try:
|
| 29 |
-
agent_instance = agent.create_agent()
|
| 30 |
-
print("✓ Agent created successfully")
|
| 31 |
-
except Exception as e:
|
| 32 |
-
print(f"✗ Agent creation error: {e}")
|
| 33 |
-
|
| 34 |
-
try:
|
| 35 |
-
ctx_mgr = context.create_context_manager()
|
| 36 |
-
print("✓ Context manager created")
|
| 37 |
-
except Exception as e:
|
| 38 |
-
print(f"✗ Context manager error: {e}")
|
| 39 |
-
|
| 40 |
-
try:
|
| 41 |
-
response = agent_instance.process("list my tasks")
|
| 42 |
-
print(f"✓ Agent responds: {response.content[:50]}...")
|
| 43 |
-
except Exception as e:
|
| 44 |
-
print(f"✗ Agent response error: {e}")
|
| 45 |
-
|
| 46 |
-
print("="*60)
|
| 47 |
-
print("Stack 2.9 CLI and Agent Interface is ready!")
|
| 48 |
-
print("\nTo run:")
|
| 49 |
-
print(" python stack.py # Interactive chat")
|
| 50 |
-
print(" python stack.py -c \"...\" # Single command")
|
| 51 |
-
print(" python demo_stack.py # Run demo")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
training-data/manifest.json
CHANGED
|
@@ -1,18 +1,18 @@
|
|
| 1 |
{
|
| 2 |
"dataset": {
|
| 3 |
"name": "Stack 2.9 Training Data",
|
| 4 |
-
"version": "0.
|
| 5 |
"description": "Training data for Stack 2.9, an open-source coding assistant based on Qwen2.5-Coder",
|
| 6 |
-
"source": "OpenClaw architecture + synthetic examples + code analysis",
|
| 7 |
"license": "Apache 2.0"
|
| 8 |
},
|
| 9 |
"stats": {
|
| 10 |
-
"toolSchemas":
|
| 11 |
-
"syntheticExamples":
|
| 12 |
"codeCommentPairs": 4045,
|
| 13 |
"testExamples": 0,
|
| 14 |
"conversations": 0,
|
| 15 |
-
"totalExamples":
|
| 16 |
},
|
| 17 |
"model_config": {
|
| 18 |
"base_model": "Qwen2.5-Coder-32B",
|
|
@@ -39,11 +39,12 @@
|
|
| 39 |
"eos_token": "<|endoftext|>"
|
| 40 |
},
|
| 41 |
"training_data": {
|
| 42 |
-
"synthetic_examples": "
|
| 43 |
-
"tools_catalog": "
|
| 44 |
-
"code_pairs": "
|
| 45 |
-
"test_examples": "
|
| 46 |
-
"conversations": "
|
|
|
|
| 47 |
"estimated_tokens": "~50M tokens total",
|
| 48 |
"recommended_dataset_size": "100K - 1M examples"
|
| 49 |
},
|
|
|
|
| 1 |
{
|
| 2 |
"dataset": {
|
| 3 |
"name": "Stack 2.9 Training Data",
|
| 4 |
+
"version": "0.3.0",
|
| 5 |
"description": "Training data for Stack 2.9, an open-source coding assistant based on Qwen2.5-Coder",
|
| 6 |
+
"source": "OpenClaw architecture + synthetic examples + code analysis + RTMP extraction",
|
| 7 |
"license": "Apache 2.0"
|
| 8 |
},
|
| 9 |
"stats": {
|
| 10 |
+
"toolSchemas": 46,
|
| 11 |
+
"syntheticExamples": 228,
|
| 12 |
"codeCommentPairs": 4045,
|
| 13 |
"testExamples": 0,
|
| 14 |
"conversations": 0,
|
| 15 |
+
"totalExamples": 228
|
| 16 |
},
|
| 17 |
"model_config": {
|
| 18 |
"base_model": "Qwen2.5-Coder-32B",
|
|
|
|
| 39 |
"eos_token": "<|endoftext|>"
|
| 40 |
},
|
| 41 |
"training_data": {
|
| 42 |
+
"synthetic_examples": "./synthetic/examples.jsonl",
|
| 43 |
+
"tools_catalog": "./tools/catalog.json",
|
| 44 |
+
"code_pairs": "./code-pairs/pairs.json",
|
| 45 |
+
"test_examples": "./code-pairs/test-examples.json",
|
| 46 |
+
"conversations": "./conversations/parsed.json",
|
| 47 |
+
"src_derived": "./src-derived/rtmp_examples.jsonl",
|
| 48 |
"estimated_tokens": "~50M tokens total",
|
| 49 |
"recommended_dataset_size": "100K - 1M examples"
|
| 50 |
},
|
training-data/src-derived/rtmp_examples.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:72a74492eccf0c14d8918545c46c9dd5d8c9d27af15ecea013ffe4501ef16b22
|
| 3 |
+
size 7133
|
training-data/tools/catalog.json
CHANGED
|
@@ -257,5 +257,106 @@
|
|
| 257 |
"hasPrompt": true,
|
| 258 |
"hasImplementation": false,
|
| 259 |
"inputSchema": {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 260 |
}
|
| 261 |
]
|
|
|
|
| 257 |
"hasPrompt": true,
|
| 258 |
"hasImplementation": false,
|
| 259 |
"inputSchema": {}
|
| 260 |
+
},
|
| 261 |
+
{
|
| 262 |
+
"tool": "VoiceRecordingTool",
|
| 263 |
+
"description": "Record voice input from the microphone for voice commands. Captures audio and converts to base64 for processing. Use for hands-free coding commands.",
|
| 264 |
+
"hasPrompt": true,
|
| 265 |
+
"hasImplementation": true,
|
| 266 |
+
"inputSchema": {
|
| 267 |
+
"type": "object",
|
| 268 |
+
"properties": {
|
| 269 |
+
"maxDuration": {
|
| 270 |
+
"type": "number",
|
| 271 |
+
"description": "Maximum recording duration in seconds"
|
| 272 |
+
}
|
| 273 |
+
}
|
| 274 |
+
}
|
| 275 |
+
},
|
| 276 |
+
{
|
| 277 |
+
"tool": "VoiceSynthesisTool",
|
| 278 |
+
"description": "Convert text to speech using a cloned voice. Takes text input and synthesizes spoken audio output. Use to speak responses aloud.",
|
| 279 |
+
"hasPrompt": true,
|
| 280 |
+
"hasImplementation": true,
|
| 281 |
+
"inputSchema": {
|
| 282 |
+
"type": "object",
|
| 283 |
+
"properties": {
|
| 284 |
+
"text": {
|
| 285 |
+
"type": "string",
|
| 286 |
+
"description": "Text to synthesize to speech"
|
| 287 |
+
},
|
| 288 |
+
"voiceName": {
|
| 289 |
+
"type": "string",
|
| 290 |
+
"description": "Name of the voice model to use"
|
| 291 |
+
}
|
| 292 |
+
},
|
| 293 |
+
"required": [
|
| 294 |
+
"text"
|
| 295 |
+
]
|
| 296 |
+
}
|
| 297 |
+
},
|
| 298 |
+
{
|
| 299 |
+
"tool": "VoiceCloneTool",
|
| 300 |
+
"description": "Clone a voice from audio samples for use in synthesis. Creates a custom voice model from reference audio. Use to create personalized voices.",
|
| 301 |
+
"hasPrompt": true,
|
| 302 |
+
"hasImplementation": true,
|
| 303 |
+
"inputSchema": {
|
| 304 |
+
"type": "object",
|
| 305 |
+
"properties": {
|
| 306 |
+
"voiceName": {
|
| 307 |
+
"type": "string",
|
| 308 |
+
"description": "Name for the cloned voice"
|
| 309 |
+
},
|
| 310 |
+
"audioPath": {
|
| 311 |
+
"type": "string",
|
| 312 |
+
"description": "Path to audio sample file"
|
| 313 |
+
}
|
| 314 |
+
},
|
| 315 |
+
"required": [
|
| 316 |
+
"voiceName"
|
| 317 |
+
]
|
| 318 |
+
}
|
| 319 |
+
},
|
| 320 |
+
{
|
| 321 |
+
"tool": "VoiceStatusTool",
|
| 322 |
+
"description": "Check voice service status and list available voices. Returns recording availability, API status, and available voice models.",
|
| 323 |
+
"hasPrompt": true,
|
| 324 |
+
"hasImplementation": true,
|
| 325 |
+
"inputSchema": {}
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"tool": "McpAuthTool",
|
| 329 |
+
"description": "McpAuth tool",
|
| 330 |
+
"hasPrompt": false,
|
| 331 |
+
"hasImplementation": true,
|
| 332 |
+
"inputSchema": {}
|
| 333 |
+
},
|
| 334 |
+
{
|
| 335 |
+
"tool": "REPLTool",
|
| 336 |
+
"description": "REPL tool",
|
| 337 |
+
"hasPrompt": false,
|
| 338 |
+
"hasImplementation": true,
|
| 339 |
+
"inputSchema": {}
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"tool": "SyntheticOutputTool",
|
| 343 |
+
"description": "SyntheticOutput tool",
|
| 344 |
+
"hasPrompt": false,
|
| 345 |
+
"hasImplementation": true,
|
| 346 |
+
"inputSchema": {}
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"tool": "shared",
|
| 350 |
+
"description": "shared tool",
|
| 351 |
+
"hasPrompt": false,
|
| 352 |
+
"hasImplementation": true,
|
| 353 |
+
"inputSchema": {}
|
| 354 |
+
},
|
| 355 |
+
{
|
| 356 |
+
"tool": "testing",
|
| 357 |
+
"description": "testing tool",
|
| 358 |
+
"hasPrompt": false,
|
| 359 |
+
"hasImplementation": true,
|
| 360 |
+
"inputSchema": {}
|
| 361 |
}
|
| 362 |
]
|
tsconfig.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"compilerOptions": {
|
| 3 |
+
"target": "ES2022",
|
| 4 |
+
"module": "ESNext",
|
| 5 |
+
"moduleResolution": "bundler",
|
| 6 |
+
"lib": ["ES2022"],
|
| 7 |
+
"outDir": "./dist",
|
| 8 |
+
"rootDir": "./src",
|
| 9 |
+
"strict": true,
|
| 10 |
+
"esModuleInterop": true,
|
| 11 |
+
"skipLibCheck": true,
|
| 12 |
+
"forceConsistentCasingInFileNames": true,
|
| 13 |
+
"resolveJsonModule": true,
|
| 14 |
+
"declaration": true,
|
| 15 |
+
"declarationMap": true,
|
| 16 |
+
"sourceMap": true,
|
| 17 |
+
"allowImportingTsExtensions": true,
|
| 18 |
+
"noEmit": true
|
| 19 |
+
},
|
| 20 |
+
"include": ["src/**/*"],
|
| 21 |
+
"exclude": ["node_modules", "dist"]
|
| 22 |
+
}
|