| | from datasets import load_dataset |
| | from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer |
| |
|
| | |
| | dataset = load_dataset('json', data_files='path_to_your/shell_commands_mock_data.json') |
| |
|
| | |
| | model_name = "Repl.it/llama-2-13b" |
| | tokenizer = AutoTokenizer.from_pretrained(model_name) |
| | model = AutoModelForCausalLM.from_pretrained(model_name) |
| |
|
| | |
| | def tokenize_function(examples): |
| | return tokenizer(examples['prompt'], padding="max_length", truncation=True) |
| |
|
| | tokenized_datasets = dataset.map(tokenize_function, batched=True) |
| |
|
| | |
| | training_args = TrainingArguments( |
| | output_dir="./results", |
| | evaluation_strategy="epoch", |
| | learning_rate=2e-5, |
| | per_device_train_batch_size=1, |
| | per_device_eval_batch_size=1, |
| | num_train_epochs=3, |
| | weight_decay=0.01, |
| | logging_dir="./logs", |
| | logging_steps=10, |
| | save_steps=100, |
| | ) |
| |
|
| | |
| | trainer = Trainer( |
| | model=model, |
| | args=training_args, |
| | train_dataset=tokenized_datasets['train'], |
| | eval_dataset=tokenized_datasets['test'] if 'test' in tokenized_datasets else None, |
| | ) |
| |
|
| | |
| | trainer.train() |
| |
|
| | |
| | trainer.save_model("./fine_tuned_model") |
| |
|
| | |
| | trainer.evaluate() |