| | import torch
|
| | import torch.nn as nn
|
| | import torch.optim as optim
|
| | import numpy as np
|
| |
|
| |
|
| | sequence_length = 100
|
| | batch_size = 64
|
| | hidden_size = 128
|
| | num_layers = 1
|
| | learning_rate = 0.01
|
| | num_epochs = 100
|
| | device = torch.device('cpu')
|
| |
|
| |
|
| | text = "Hello, this is a simple language model for text generation using PyTorch."
|
| | chars = sorted(list(set(text)))
|
| | vocab_size = len(chars)
|
| | char_to_idx = {ch: i for i, ch in enumerate(chars)}
|
| | idx_to_char = {i: ch for i, ch in enumerate(chars)}
|
| |
|
| |
|
| | data = [char_to_idx[ch] for ch in text]
|
| | data = torch.tensor(data, dtype=torch.long).to(device)
|
| |
|
| |
|
| | def create_batches(data, batch_size, sequence_length):
|
| | num_batches = len(data) // (batch_size * sequence_length)
|
| | data = data[:num_batches * batch_size * sequence_length]
|
| | data = data.view(batch_size, -1)
|
| | for i in range(0, data.size(1), sequence_length):
|
| | x = data[:, i:i+sequence_length]
|
| | y = data[:, (i+1):(i+1)+sequence_length]
|
| | yield x, y
|
| |
|
| |
|
| | class CharRNN(nn.Module):
|
| | def __init__(self, vocab_size, hidden_size, num_layers):
|
| | super(CharRNN, self).__init__()
|
| | self.hidden_size = hidden_size
|
| | self.num_layers = num_layers
|
| | self.embedding = nn.Embedding(vocab_size, hidden_size)
|
| | self.lstm = nn.LSTM(hidden_size, hidden_size, num_layers, batch_first=True)
|
| | self.fc = nn.Linear(hidden_size, vocab_size)
|
| |
|
| | def forward(self, x, hidden):
|
| | x = self.embedding(x)
|
| | out, hidden = self.lstm(x, hidden)
|
| | out = self.fc(out)
|
| | return out, hidden
|
| |
|
| | def init_hidden(self, batch_size):
|
| | return (torch.zeros(self.num_layers, batch_size, self.hidden_size).to(device),
|
| | torch.zeros(self.num_layers, batch_size, self.hidden_size).to(device))
|
| |
|
| |
|
| | model = CharRNN(vocab_size, hidden_size, num_layers).to(device)
|
| | criterion = nn.CrossEntropyLoss()
|
| | optimizer = optim.Adam(model.parameters(), lr=learning_rate)
|
| |
|
| |
|
| | for epoch in range(num_epochs):
|
| | hidden = model.init_hidden(batch_size)
|
| | for i, (x, y) in enumerate(create_batches(data, batch_size, sequence_length)):
|
| | x, y = x.to(device), y.to(device)
|
| | hidden = tuple(h.detach() for h in hidden)
|
| | optimizer.zero_grad()
|
| | output, hidden = model(x, hidden)
|
| | loss = criterion(output.transpose(1, 2), y)
|
| | loss.backward()
|
| | optimizer.step()
|
| |
|
| | if (i+1) % 10 == 0:
|
| | print(f'Epoch [{epoch+1}/{num_epochs}], Step [{i+1}], Loss: {loss.item():.4f}')
|
| |
|
| |
|
| | def generate_text(model, start_str, length=100):
|
| | model.eval()
|
| | chars = [char_to_idx[ch] for ch in start_str]
|
| | hidden = model.init_hidden(1)
|
| | for i in range(length):
|
| | x = torch.tensor([chars[-1]], dtype=torch.long).unsqueeze(0).to(device)
|
| | with torch.no_grad():
|
| | output, hidden = model(x, hidden)
|
| | prob = torch.softmax(output.squeeze(), dim=0).cpu().numpy()
|
| | next_char = np.random.choice(vocab_size, p=prob)
|
| | chars.append(next_char)
|
| | return ''.join([idx_to_char[ch] for ch in chars])
|
| |
|
| |
|
| | start_str = "Hello"
|
| | generated_text = generate_text(model, start_str, length=200)
|
| | print(generated_text) |