jonmabe's picture
Upload app.py with huggingface_hub
3045603 verified
"""
Tiny-LLM CLI SFT Demo - Generate Shell Commands from Natural Language
This model was fine-tuned to translate natural language instructions to CLI commands.
"""
import gradio as gr
import torch
from huggingface_hub import hf_hub_download
from model import TinyLLM, MODEL_CONFIG
# Model configuration
MODEL_ID = "jonmabe/tiny-llm-cli-sft"
MODEL_FILENAME = "best_model.pt"
# Load tokenizer
try:
from tokenizers import Tokenizer
tokenizer_path = hf_hub_download(repo_id=MODEL_ID, filename="tokenizer.json")
tokenizer = Tokenizer.from_file(tokenizer_path)
print("Loaded tokenizer from model repo")
except Exception as e:
print(f"Could not load tokenizer: {e}")
tokenizer = None
# Load model
print("Downloading model...")
model_path = hf_hub_download(repo_id=MODEL_ID, filename=MODEL_FILENAME)
print(f"Model downloaded to {model_path}")
print("Loading model...")
checkpoint = torch.load(model_path, map_location="cpu", weights_only=False)
# Get config from checkpoint if available
if "config" in checkpoint and isinstance(checkpoint["config"], dict):
config = checkpoint["config"]
if "model" in config:
config = config["model"]
else:
config = MODEL_CONFIG
# Initialize model
model = TinyLLM(config)
# Load weights
if "model_state_dict" in checkpoint:
state_dict = checkpoint["model_state_dict"]
else:
state_dict = checkpoint
missing, unexpected = model.load_state_dict(state_dict, strict=False)
if missing:
print(f"Warning: Missing keys: {missing[:5]}...")
if unexpected:
print(f"Warning: Unexpected keys: {unexpected[:5]}...")
# Move to device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
model.eval()
total_params = sum(p.numel() for p in model.parameters())
print(f"Model loaded on {device} with {total_params:,} parameters")
def clean_bpe_output(text: str) -> str:
"""Clean BPE artifacts from tokenizer output."""
# Replace BPE space marker with actual space
text = text.replace("Ġ", " ")
# Replace BPE newline marker with actual newline
text = text.replace("Ċ", "\n")
# Clean up extra spaces
text = " ".join(text.split())
return text.strip()
def generate_command(
instruction: str,
max_tokens: int = 50,
temperature: float = 0.7,
top_p: float = 0.9,
top_k: int = 50,
) -> str:
"""Generate a CLI command from an instruction."""
if not instruction.strip():
return "Please enter an instruction."
if tokenizer is None:
return "Tokenizer not available."
# Format prompt
prompt = f"Instruction: {instruction}\nCommand:"
# Tokenize
encoded = tokenizer.encode(prompt)
input_ids = torch.tensor([encoded.ids], dtype=torch.long).to(device)
input_len = input_ids.shape[1]
# Generate
with torch.no_grad():
output_ids = model.generate(
input_ids,
max_new_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
top_k=top_k,
eos_token_id=tokenizer.token_to_id("</s>"),
)
# Decode only the generated tokens
generated_ids = output_ids[0, input_len:].tolist()
raw_output = tokenizer.decode(generated_ids)
# Clean BPE artifacts
command = clean_bpe_output(raw_output)
# Extract just the command (first line, stop at newline)
command = command.split("\n")[0].strip()
return command
# Example instructions
EXAMPLES = [
["List all files in the current directory"],
["Find all Python files"],
["Show disk usage"],
["Create a new folder called test"],
["Search for 'error' in log files"],
["Show the last 10 lines of a file"],
["Count lines in a file"],
["Copy files to another directory"],
["Show running processes"],
["Check available disk space"],
]
# Create Gradio interface
with gr.Blocks(title="CLI Command Generator") as demo:
gr.Markdown("""
# 🖥️ CLI Command Generator
Translate natural language instructions to shell commands using a **54M parameter** language model.
⚠️ **Note**: This is an early-stage SFT model. Outputs may be incomplete or incorrect.
### How to Use
1. Enter a natural language instruction
2. Click "Generate" or press Enter
3. The model will suggest a shell command
""")
with gr.Row():
with gr.Column(scale=2):
instruction_input = gr.Textbox(
label="Instruction",
placeholder="Describe what you want to do...",
lines=2,
value="List all files in the current directory"
)
with gr.Row():
with gr.Column():
max_tokens = gr.Slider(
minimum=10,
maximum=100,
value=50,
step=5,
label="Max Tokens",
)
temperature = gr.Slider(
minimum=0.1,
maximum=1.5,
value=0.7,
step=0.1,
label="Temperature",
info="Higher = more creative"
)
with gr.Column():
top_p = gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.9,
step=0.05,
label="Top-p",
)
top_k = gr.Slider(
minimum=1,
maximum=100,
value=50,
step=5,
label="Top-k",
)
generate_btn = gr.Button("⚡ Generate Command", variant="primary", size="lg")
with gr.Column(scale=2):
output_command = gr.Textbox(
label="Generated Command",
lines=3,
interactive=False,
)
gr.Markdown("""
### Common Commands Reference
- `ls` - list files
- `find` - search for files
- `grep` - search in files
- `df` - disk usage
- `du` - directory size
- `tar` - archive files
- `scp` - copy over SSH
""")
gr.Markdown("### 📝 Example Instructions")
gr.Examples(
examples=EXAMPLES,
inputs=instruction_input,
)
# Event handlers
generate_btn.click(
fn=generate_command,
inputs=[instruction_input, max_tokens, temperature, top_p, top_k],
outputs=output_command,
)
instruction_input.submit(
fn=generate_command,
inputs=[instruction_input, max_tokens, temperature, top_p, top_k],
outputs=output_command,
)
gr.Markdown("""
---
### About This Model
**Model**: [jonmabe/tiny-llm-cli-sft](https://huggingface.co/jonmabe/tiny-llm-cli-sft)
This is a Supervised Fine-Tuned (SFT) version of [tiny-llm-54m](https://huggingface.co/jonmabe/tiny-llm-54m),
trained on ~13,000 natural language → CLI command pairs.
#### Known Limitations
- 🔬 **Experimental**: Outputs may be incomplete or incorrect
- 📊 **Small model**: 54M parameters limits capability
- 🔧 **Needs improvement**: More training data and steps needed
#### Training Details
- **Steps**: 2,000
- **Best Val Loss**: 1.2456
- **Data**: Geddy's NL2Bash + NL2Bash benchmark + synthetic
- **Hardware**: RTX 5090, ~9 minutes
""")
if __name__ == "__main__":
demo.launch()