| | |
| | import os |
| | import sys |
| | import io |
| | os.environ['CUDA_VISIBLE_DEVICES'] = '-1' |
| | os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' |
| | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' |
| | os.environ['GRADIO_HOT_RELOAD'] = 'false' |
| | os.environ['WRAPT_DISABLE_EXTENSIONS'] = 'true' |
| | os.environ['PYTHONWARNINGS'] = 'ignore' |
| |
|
| | import re |
| | import nltk |
| | import pickle |
| | import string |
| | import warnings |
| | import numpy as np |
| | import gradio as gr |
| | from keras.models import load_model |
| | from keras.preprocessing.sequence import pad_sequences |
| |
|
| | |
| | warnings.filterwarnings('ignore') |
| | warnings.simplefilter('ignore') |
| |
|
| | |
| | MAX_LEN = 100 |
| | MODEL_PATH = "sentiment_analysis_best.keras" |
| | TOKENIZER_PATH = "tokenizer.pkl" |
| |
|
| | nltk.download('stopwords', quiet=True) |
| |
|
| | |
| | def expand_contractions(text): |
| | contractions = { |
| | "i'm": "i am", "you're": "you are", "he's": "he is", |
| | "she's": "she is", "it's": "it is", "we're": "we are", |
| | "they're": "they are", "i've": "i have", "you've": "you have", |
| | "we've": "we have", "they've": "they have", "i'll": "i will", |
| | "you'll": "you will", "he'll": "he will", "she'll": "she will", |
| | "we'll": "we will", "they'll": "they will", "i'd": "i would", |
| | "you'd": "you would", "he'd": "he would", "she'd": "she would", |
| | "we'd": "we would", "they'd": "they would", "don't": "do not", |
| | "doesn't": "does not", "didn't": "did not", "can't": "cannot", |
| | "couldn't": "could not", "won't": "will not", "wouldn't": "would not", |
| | "shouldn't": "should not", "isn't": "is not", "aren't": "are not", |
| | "wasn't": "was not", "weren't": "were not", "hasn't": "has not", |
| | "haven't": "have not", "hadn't": "had not", "mightn't": "might not", |
| | "mustn't": "must not", "needn't": "need not", "shan't": "shall not" |
| | } |
| | for contraction, expansion in contractions.items(): |
| | text = re.sub(r'\b' + contraction + r'\b', expansion, text, flags=re.IGNORECASE) |
| | return text |
| |
|
| | |
| | def preprocess(text): |
| | negations = {"not", "no", "nor", "never", "n't", "nobody", "nothing", "neither", "nowhere", "none"} |
| | important_words = {"am", "is", "are", "was", "were", "be", "been", "being"} |
| | |
| | try: |
| | from nltk.corpus import stopwords |
| | stop_words = set(stopwords.words("english")) - negations - important_words |
| | except: |
| | stop_words = set() |
| | |
| | text = text.lower() |
| | text = expand_contractions(text) |
| | text = re.sub(r"\d+", "", text) |
| | text = text.translate(str.maketrans('', '', string.punctuation)) |
| | words = [w for w in text.split() if w not in stop_words or w in negations or w in important_words] |
| | |
| | return " ".join(words) |
| |
|
| | |
| | def load_resources(): |
| | try: |
| | model = load_model(MODEL_PATH) |
| | print(f"β Model loaded successfully from {MODEL_PATH}") |
| | |
| | with open(TOKENIZER_PATH, "rb") as f: |
| | tokenizer = pickle.load(f) |
| | print(f"β Tokenizer loaded successfully from {TOKENIZER_PATH}") |
| | |
| | return model, tokenizer |
| | except FileNotFoundError as e: |
| | print(f"β Error: Model or Tokenizer file not found!") |
| | print(f" Make sure {MODEL_PATH} AND {TOKENIZER_PATH} are in the same directory.") |
| | raise e |
| | except Exception as e: |
| | print(f"β Error loading resources: {e}") |
| | raise e |
| |
|
| | |
| | model, tokenizer = load_resources() |
| |
|
| | |
| | def predict_sentiment(text): |
| | if not text or not text.strip(): |
| | return "β οΈ Neutral", "33.33%", "Please enter some text to analyze!" |
| | |
| | processed_text = preprocess(text) |
| | |
| | if not processed_text.strip(): |
| | return "β οΈ Neutral", "33.33%", "Text is empty after preprocessing. Try adding more words." |
| | |
| | seq = tokenizer.texts_to_sequences([processed_text]) |
| | padded = pad_sequences(seq, maxlen=MAX_LEN, padding='post') |
| | |
| | pred = model.predict(padded, verbose=0) |
| | label_idx = np.argmax(pred, axis=1)[0] |
| | confidence = pred[0][label_idx] |
| | |
| | labels = ["π Negative", "π Positive", "π Neutral"] |
| | sentiment = labels[label_idx] |
| | confidence_percentage = f"{confidence * 100:.2f}%" |
| | |
| | detailed_results = f""" |
| | ### π Detailed Analysis: |
| | |
| | **Original Text:** {text} |
| | |
| | **Processed Text:** {processed_text} |
| | |
| | **Prediction Probabilities:** |
| | - π Negative: {pred[0][0] * 100:.2f}% |
| | - π Positive: {pred[0][1] * 100:.2f}% |
| | - π Neutral: {pred[0][2] * 100:.2f}% |
| | |
| | **Final Sentiment:** {sentiment} |
| | **Confidence:** {confidence_percentage} |
| | """ |
| | return sentiment, confidence_percentage, detailed_results |
| |
|
| | |
| | def create_gradio_interface(): |
| | """Create and configure Gradio interface""" |
| | |
| | examples = [ |
| | ["I'm so happy with my purchase! Highly recommended!"], |
| | ["I don't like this at all. Very disappointing."], |
| | ["I absolutely love this product! It's amazing!"], |
| | ["This is the worst experience I've ever had."], |
| | ["Fantastic! Best decision I ever made!"], |
| | ["I'm not sure how I feel about this."], |
| | ["It's okay, nothing special really."], |
| | ["Amazing work! Best I've ever seen!"], |
| | ["This is the worst experience ever"], |
| | ["This is terrible and I hate it"], |
| | ["It works fine, no complaints."], |
| | ["Not bad, but could be better."], |
| | ["He is no good boy"], |
| | ["I'm doing great"], |
| | ["I'm not normal"], |
| | ["Both of you"], |
| | ["I am fine"], |
| | ["I am good"], |
| | ["I'm okay"] |
| | ] |
| | |
| | with gr.Blocks(title="Sentiment Analysis") as interface: |
| | gr.Markdown(""" |
| | # π Sentiment Analysis - AI Powered |
| | ### Analyze the sentiment of your text using Deep Learning (LSTM Model) |
| | |
| | **Instructions:** Enter any text in English and the model will predict whether it's Positive, Negative, or Neutral. |
| | """) |
| | |
| | with gr.Row(): |
| | with gr.Column(scale=1): |
| | text_input = gr.Textbox( |
| | label="π Enter Your Text", |
| | placeholder="Type your text here... (e.g., 'I love this product!')", |
| | lines=5, |
| | max_lines=10 |
| | ) |
| | |
| | with gr.Row(): |
| | analyze_btn = gr.Button("π Analyze Sentiment", variant="primary", size="lg") |
| | clear_btn = gr.ClearButton([text_input], value="ποΈ Clear", size="lg") |
| | |
| | with gr.Column(scale=1): |
| | sentiment_output = gr.Textbox( |
| | label="π― Predicted Sentiment", |
| | interactive=False |
| | ) |
| | confidence_output = gr.Textbox( |
| | label="π Confidence Score", |
| | interactive=False |
| | ) |
| | |
| | detailed_output = gr.Markdown( |
| | label="π Detailed Analysis", |
| | value="Results will appear here after analysis..." |
| | ) |
| | |
| | gr.Markdown("### π‘ Try These Examples:") |
| | gr.Examples( |
| | examples=examples, |
| | inputs=text_input, |
| | outputs=[sentiment_output, confidence_output, detailed_output], |
| | fn=predict_sentiment, |
| | cache_examples=False |
| | ) |
| | |
| | gr.Markdown(""" |
| | --- |
| | **Model Information:** |
| | - Architecture: Bidirectional LSTM with Embedding Layer |
| | - Classes: Negative (0), Positive (1), Neutral (2) |
| | - Max Sequence Length: 100 tokens |
| | |
| | **Tips for Best Results:** |
| | - Use clear, complete sentences |
| | - The model works best with English text |
| | - Longer texts provide more context for accurate predictions |
| | """) |
| | |
| | analyze_btn.click( |
| | fn=predict_sentiment, |
| | inputs=text_input, |
| | outputs=[sentiment_output, confidence_output, detailed_output] |
| | ) |
| | |
| | text_input.submit( |
| | fn=predict_sentiment, |
| | inputs=text_input, |
| | outputs=[sentiment_output, confidence_output, detailed_output] |
| | ) |
| | |
| | return interface |
| |
|
| | |
| | class SuppressStderr: |
| | def __enter__(self): |
| | self.original_stderr = sys.stderr |
| | sys.stderr = io.StringIO() |
| | return self |
| | |
| | def __exit__(self, exc_type, exc_val, exc_tb): |
| | sys.stderr = self.original_stderr |
| |
|
| | |
| | if __name__ == "__main__": |
| | print("\n" + "=" * 70) |
| | print("π Starting Sentiment Analysis Gradio Interface...") |
| | print("=" * 70 + "\n") |
| | |
| | |
| | interface = create_gradio_interface() |
| | |
| | |
| | print("β³ Launching server...") |
| | |
| | with SuppressStderr(): |
| | |
| | interface.launch( |
| | server_name="0.0.0.0", |
| | server_port=7860, |
| | share=False, |
| | show_error=False, |
| | ssr_mode=False, |
| | theme=gr.themes.Soft(), |
| | quiet=False, |
| | prevent_thread_lock=False |
| | ) |
| | |
| | print("\n" + "=" * 70) |
| | print("β
Interface is LIVE and ready to use!") |
| | print(" π Local URL: http://localhost:7860") |
| | print(" β‘ Server is running smoothly") |
| | print(" π Press Ctrl+C to stop") |
| | print("=" * 70) |