| from transformers import GPT2Tokenizer, GPT2LMHeadModel, Trainer, TrainingArguments, DataCollatorForLanguageModeling
|
| from datasets import Dataset
|
|
|
|
|
| def load_dataset(file_path):
|
| with open(file_path, "r", encoding="utf-8") as f:
|
| text = f.read()
|
| return [text]
|
|
|
|
|
|
|
| tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
| tokenizer.pad_token = tokenizer.eos_token
|
| model = GPT2LMHeadModel.from_pretrained("gpt2")
|
|
|
|
|
|
|
| model.save_pretrained("./finetuned_gpt2")
|
| tokenizer.save_pretrained("./finetuned_gpt2")
|
|
|
| print("Fine-tuning completed.")
|
|
|