Spaces:
Runtime error
Runtime error
| from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments | |
| from datasets import load_dataset | |
| import gradio as gr | |
| import torch | |
| # Cargar el modelo y el tokenizador | |
| tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large") | |
| model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large") | |
| # Cargar tu conjunto de datos | |
| try: | |
| dataset = load_dataset('csv', data_files='alpaca.csv', delimiter='\t') # Especificar el delimitador como tabulaciΓ³n | |
| print("Conjunto de datos cargado correctamente.") | |
| print("Columnas disponibles:", dataset['train'].column_names) # Imprimir nombres de columnas | |
| except Exception as e: | |
| print(f"Error al cargar el conjunto de datos: {e}") | |
| # Preprocesar los datos | |
| def preprocess_function(examples): | |
| inputs = [ex['instruction'] for ex in examples] # Usar solo la columna de instruction | |
| outputs = [ex['output'] for ex in examples] # Usar solo la columna de output | |
| model_inputs = tokenizer(inputs, max_length=512, truncation=True) | |
| # Configurar las etiquetas | |
| with tokenizer.as_target_tokenizer(): | |
| labels = tokenizer(outputs, max_length=512, truncation=True) | |
| model_inputs["labels"] = labels["input_ids"] | |
| return model_inputs | |
| # Mapear el conjunto de datos | |
| tokenized_dataset = dataset.map(preprocess_function, batched=True) | |
| # Configurar los argumentos de entrenamiento | |
| training_args = TrainingArguments( | |
| output_dir="./results", | |
| evaluation_strategy="epoch", | |
| learning_rate=2e-5, | |
| per_device_train_batch_size=2, | |
| num_train_epochs=3, | |
| ) | |
| # Crear el Trainer | |
| trainer = Trainer( | |
| model=model, | |
| args=training_args, | |
| train_dataset=tokenized_dataset['train'], | |
| ) | |
| # Entrenar el modelo | |
| trainer.train() | |
| # Guardar el modelo entrenado | |
| model.save_pretrained("./mi_modelo_entrenado") | |
| tokenizer.save_pretrained("./mi_modelo_entrenado") | |
| # Cargar el modelo entrenado | |
| model = AutoModelForCausalLM.from_pretrained("./mi_modelo_entrenado") | |
| tokenizer = AutoTokenizer.from_pretrained("./mi_modelo_entrenado") | |
| # Inicializar el historial de conversaciΓ³n | |
| chat_history_ids = None | |
| # FunciΓ³n de chat | |
| def chat_with_bot(user_input): | |
| global chat_history_ids | |
| try: | |
| new_user_input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt') | |
| bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if chat_history_ids is not None else new_user_input_ids | |
| chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id) | |
| response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True) | |
| # Si la respuesta es vacΓa o no tiene sentido, devuelve una respuesta predeterminada | |
| if not response.strip(): | |
| return "Lo siento, no entiendo la pregunta." | |
| return response | |
| except Exception as e: | |
| return f"Error: {e}. No pude procesar tu pregunta." | |
| # Crear la interfaz de Gradio | |
| iface = gr.Interface(fn=chat_with_bot, inputs="text", outputs="text", title="Chatbot Entrenado") | |
| iface.launch() | |