| | import gradio as gr |
| | from sentence_transformers import SentenceTransformer, util |
| | import openai |
| | import os |
| |
|
| | os.environ["TOKENIZERS_PARALLELISM"] = "false" |
| |
|
| | |
| | filename = "output_topic_details.txt" |
| | retrieval_model_name = 'output/sentence-transformer-finetuned/' |
| |
|
| | openai.api_key = os.environ["OPENAI_API_KEY"] |
| |
|
| | system_message = "You are a movie reccomendation chatbot created to hold a spotlight on women in the film industry" |
| | |
| | messages = [{"role": "system", "content": system_message}] |
| |
|
| | |
| | try: |
| | retrieval_model = SentenceTransformer(retrieval_model_name) |
| | print("Models loaded successfully.") |
| | except Exception as e: |
| | print(f"Failed to load models: {e}") |
| |
|
| | def load_and_preprocess_text(filename): |
| | """ |
| | Load and preprocess text from a file, removing empty lines and stripping whitespace. |
| | """ |
| | try: |
| | with open(filename, 'r', encoding='utf-8') as file: |
| | segments = [line.strip() for line in file if line.strip()] |
| | print("Text loaded and preprocessed successfully.") |
| | return segments |
| | except Exception as e: |
| | print(f"Failed to load or preprocess text: {e}") |
| | return [] |
| |
|
| | segments = load_and_preprocess_text(filename) |
| |
|
| | def find_relevant_segment(user_query, segments): |
| | """ |
| | Find the most relevant text segment for a user's query using cosine similarity among sentence embeddings. |
| | This version finds the best match based on the content of the query. |
| | """ |
| | try: |
| | |
| | lower_query = user_query.lower() |
| | |
| | |
| | query_embedding = retrieval_model.encode(lower_query) |
| | segment_embeddings = retrieval_model.encode(segments) |
| | |
| | |
| | similarities = util.pytorch_cos_sim(query_embedding, segment_embeddings)[0] |
| | |
| | |
| | best_idx = similarities.argmax() |
| | |
| | |
| | return segments[best_idx] |
| | except Exception as e: |
| | print(f"Error in finding relevant segment: {e}") |
| | return "" |
| |
|
| | def generate_response(user_query, relevant_segment): |
| | """ |
| | Generate a response emphasizing the bot's capability in providing chess information. |
| | """ |
| | try: |
| | user_message = f"Here's the information on movies: {relevant_segment}" |
| |
|
| | |
| | messages.append({"role": "user", "content": user_message}) |
| | |
| | response = openai.ChatCompletion.create( |
| | model="gpt-3.5-turbo", |
| | messages=messages, |
| | max_tokens=150, |
| | temperature=0.2, |
| | top_p=1, |
| | frequency_penalty=0, |
| | presence_penalty=0 |
| | ) |
| | |
| | |
| | output_text = response['choices'][0]['message']['content'].strip() |
| | |
| | |
| | messages.append({"role": "assistant", "content": output_text}) |
| | |
| | return output_text |
| | |
| | except Exception as e: |
| | print(f"Error in generating response: {e}") |
| | return f"Error in generating response: {e}" |
| |
|
| | def query_model(question): |
| | """ |
| | Process a question, find relevant information, and generate a response. |
| | """ |
| | if question == "": |
| | return "Welcome to SheScreen! Ask me anything about female-centered films and women in the film industry" |
| | relevant_segment = find_relevant_segment(question, segments) |
| | if not relevant_segment: |
| | return "Could not find specific information. Please refine your question." |
| | response = generate_response(question, relevant_segment) |
| | return response |
| |
|
| | |
| | welcome_message = """ |
| | # ♟️ Welcome to SheScreen! |
| | |
| | ## Your AI-driven assistant for all film-related queries. Created by Erin Enriquez, Fiona Beauchamp, and Tamara Landman of the 2024 Kode With Klossy CITY Camp. |
| | """ |
| |
|
| | topics = """ |
| | |
| | # Setup the Gradio Blocks interface with custom layout components |
| | with gr.Blocks(theme='JohnSmith9982/small_and_pretty') as demo: |
| | gr.Markdown(welcome_message) # Display the formatted welcome message |
| | with gr.Row(): |
| | with gr.Column(): |
| | gr.Markdown(topics) # Show the topics on the left side |
| | with gr.Row(): |
| | with gr.Column(): |
| | question = gr.Textbox(label="Your question", placeholder="What do you want to ask about?") |
| | answer = gr.Textbox(label="SheScreen Response", placeholder="SheScreen will respond here...", interactive=False, lines=10) |
| | submit_button = gr.Button("Submit") |
| | submit_button.click(fn=query_model, inputs=question, outputs=answer) |
| | |
| | |
| | # Launch the Gradio app to allow user interaction |
| | demo.launch(share=True) |
| | |