| |
| """ |
| Test script to verify the trained model works correctly. |
| """ |
|
|
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| import torch |
| import time |
|
|
| def test_model_loading(): |
| """Test that the model loads successfully.""" |
| print("π§ͺ Testing model loading...") |
| |
| model_path = "/home/x/adaptai/experiments/qwen3-8b-elizabeth-simple/" |
| |
| |
| print("π₯ Loading tokenizer...") |
| tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) |
| print("β
Tokenizer loaded successfully!") |
| |
| |
| print("π₯ Loading model...") |
| start_time = time.time() |
| |
| model = AutoModelForCausalLM.from_pretrained( |
| model_path, |
| torch_dtype=torch.bfloat16, |
| device_map="auto", |
| trust_remote_code=True |
| ) |
| |
| load_time = time.time() - start_time |
| print(f"β
Model loaded successfully in {load_time:.2f} seconds!") |
| |
| |
| print(f"π Model device: {model.device}") |
| print(f"π Model dtype: {model.dtype}") |
| |
| return model, tokenizer |
|
|
| def test_inference(model, tokenizer): |
| """Test basic inference.""" |
| print("\nπ§ͺ Testing inference...") |
| |
| |
| prompt = "Hello, how are you today?" |
| |
| print(f"π Prompt: {prompt}") |
| |
| |
| inputs = tokenizer(prompt, return_tensors="pt").to(model.device) |
| print(f"π’ Input tokens: {inputs.input_ids.shape}") |
| |
| |
| start_time = time.time() |
| with torch.no_grad(): |
| outputs = model.generate( |
| **inputs, |
| max_new_tokens=50, |
| do_sample=True, |
| temperature=0.7, |
| pad_token_id=tokenizer.eos_token_id |
| ) |
| |
| gen_time = time.time() - start_time |
| |
| |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| |
| print(f"β
Generation completed in {gen_time:.2f} seconds!") |
| print(f"π¬ Response: {response}") |
| |
| return response |
|
|
| def test_tool_use_capability(model, tokenizer): |
| """Test tool use capability.""" |
| print("\nπ§ͺ Testing tool use capability...") |
| |
| tool_prompt = """Please help me calculate the square root of 144 using the calculator tool. |
| |
| Available tools: |
| - calculator: Performs mathematical calculations |
| |
| Please respond with the tool call format.""" |
| |
| print(f"π Tool prompt: {tool_prompt[:100]}...") |
| |
| inputs = tokenizer(tool_prompt, return_tensors="pt").to(model.device) |
| |
| with torch.no_grad(): |
| outputs = model.generate( |
| **inputs, |
| max_new_tokens=100, |
| do_sample=True, |
| temperature=0.3, |
| pad_token_id=tokenizer.eos_token_id |
| ) |
| |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| |
| print(f"π¬ Tool use response: {response}") |
| |
| |
| if "calculator" in response.lower() or "tool" in response.lower(): |
| print("β
Tool use capability detected!") |
| return True |
| else: |
| print("β οΈ Tool use pattern not clearly detected") |
| return False |
|
|
| if __name__ == "__main__": |
| print("=" * 60) |
| print("π€ Qwen3-8B-Elizabeth-Simple Model Test") |
| print("=" * 60) |
| |
| try: |
| model, tokenizer = test_model_loading() |
| test_inference(model, tokenizer) |
| tool_use_detected = test_tool_use_capability(model, tokenizer) |
| |
| print("\n" + "=" * 60) |
| print("π Model Test Summary:") |
| print(f" β
Model loading: Successful") |
| print(f" β
Basic inference: Working") |
| print(f" β
Tool use capability: {'Detected' if tool_use_detected else 'Needs verification'}") |
| print(" π Model is ready for deployment!") |
| print("=" * 60) |
| |
| except Exception as e: |
| print(f"\nβ Test failed with error: {e}") |
| raise |