#!/usr/bin/env python3 """ Production Installation Script for AI Text Humanizer Ensures all advanced features are properly installed and working """ import subprocess import sys import os import time def run_command(cmd, description, critical=True): """Run a command and handle errors""" print(f"๐Ÿ”„ {description}...") try: result = subprocess.run(cmd, shell=True, check=True, capture_output=True, text=True) print(f"โœ… {description} - SUCCESS") if result.stdout.strip(): print(f" Output: {result.stdout.strip()}") return True except subprocess.CalledProcessError as e: print(f"โŒ {description} - FAILED") print(f" Error: {e.stderr.strip()}") if critical: return False return True def check_gpu_availability(): """Check if CUDA/GPU is available for better performance""" try: result = subprocess.run(["nvidia-smi"], capture_output=True, text=True) if result.returncode == 0: print("๐Ÿš€ NVIDIA GPU detected - will install CUDA support") return True except FileNotFoundError: pass print("๐Ÿ’ป No NVIDIA GPU detected - using CPU versions") return False def production_install(): """Install production-grade AI Text Humanizer with all features""" print("๐Ÿญ AI TEXT HUMANIZER - PRODUCTION INSTALLATION") print("=" * 55) print("๐Ÿ“‹ This will install ALL advanced features:") print(" โœจ Advanced semantic similarity (sentence-transformers)") print(" ๐Ÿง  AI paraphrasing capabilities (transformers)") print(" ๐Ÿš€ GPU acceleration (if available)") print(" ๐Ÿ“Š Full API and web interfaces") print("") # Check system has_gpu = check_gpu_availability() print("๐Ÿ”ง Starting production installation...") print("-" * 40) # Step 1: Clean existing installation print("\n๐Ÿ“ฆ STEP 1: Cleaning existing installation") cleanup_commands = [ "pip uninstall -y sentence-transformers transformers huggingface_hub torch torchvision torchaudio", "pip cache purge" ] for cmd in cleanup_commands: run_command(cmd, "Cleaning previous installation", critical=False) # Step 2: Upgrade pip and install build tools print("\n๐Ÿ”จ STEP 2: Installing build tools") build_commands = [ "pip install --upgrade pip setuptools wheel", "pip install --upgrade packaging" ] for cmd in build_commands: if not run_command(cmd, "Installing build tools"): return False # Step 3: Install PyTorch (choose CPU or GPU version) print("\n๐Ÿง  STEP 3: Installing PyTorch") if has_gpu: torch_cmd = "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121" else: torch_cmd = "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu" if not run_command(torch_cmd, "Installing PyTorch with proper backend"): print("โš ๏ธ PyTorch installation failed, trying alternative...") if not run_command("pip install torch==2.1.0", "Installing PyTorch (fallback)"): return False # Step 4: Install HuggingFace ecosystem with compatible versions print("\n๐Ÿค— STEP 4: Installing HuggingFace ecosystem") hf_commands = [ "pip install huggingface_hub==0.17.3", "pip install tokenizers==0.14.1", "pip install transformers==4.35.0", "pip install accelerate==0.24.1" ] for cmd in hf_commands: if not run_command(cmd, f"Installing {cmd.split()[1]}"): return False # Step 5: Install sentence transformers print("\n๐Ÿ”ค STEP 5: Installing Sentence Transformers") if not run_command("pip install sentence-transformers==2.2.2", "Installing Sentence Transformers"): print("โš ๏ธ Trying alternative installation...") if not run_command("pip install sentence-transformers==2.1.0", "Installing Sentence Transformers (fallback)"): return False # Step 6: Install additional ML libraries print("\n๐Ÿ“Š STEP 6: Installing ML libraries") ml_commands = [ "pip install scikit-learn==1.3.2", "pip install numpy==1.25.2", "pip install pandas==2.1.3", "pip install nltk==3.8.1" ] for cmd in ml_commands: if not run_command(cmd, f"Installing {cmd.split()[1]}"): return False # Step 7: Install web frameworks print("\n๐ŸŒ STEP 7: Installing web frameworks") web_commands = [ "pip install fastapi==0.104.1", "pip install uvicorn[standard]==0.24.0", "pip install gradio==4.7.1", "pip install python-multipart==0.0.6", "pip install aiofiles==23.2.1", "pip install requests==2.31.0" ] for cmd in web_commands: if not run_command(cmd, f"Installing {cmd.split()[1]}"): return False # Step 8: Install optional production libraries print("\nโšก STEP 8: Installing production libraries") prod_commands = [ "pip install redis==5.0.1", "pip install psutil", "pip install python-dotenv" ] for cmd in prod_commands: run_command(cmd, f"Installing {cmd.split()[1]}", critical=False) # Step 9: Download NLTK data print("\n๐Ÿ“š STEP 9: Downloading NLTK data") nltk_downloads = [ "python -c \"import nltk; nltk.download('punkt', quiet=True)\"", "python -c \"import nltk; nltk.download('wordnet', quiet=True)\"", "python -c \"import nltk; nltk.download('omw-1.4', quiet=True)\"", "python -c \"import nltk; nltk.download('stopwords', quiet=True)\"" ] for cmd in nltk_downloads: run_command(cmd, "Downloading NLTK data", critical=False) # Step 10: Pre-download models print("\n๐Ÿค– STEP 10: Pre-downloading models") model_downloads = [ "python -c \"from sentence_transformers import SentenceTransformer; SentenceTransformer('all-MiniLM-L6-v2')\"", "python -c \"from transformers import pipeline; pipeline('text2text-generation', model='google/flan-t5-small')\"" ] for cmd in model_downloads: run_command(cmd, "Pre-downloading models", critical=False) print(f"\n๐ŸŽ‰ INSTALLATION COMPLETED!") return True def test_installation(): """Test if all components are working""" print(f"\n๐Ÿงช TESTING INSTALLATION") print("=" * 30) test_results = {} # Test imports imports_to_test = [ ("sentence_transformers", "SentenceTransformer"), ("transformers", "pipeline"), ("torch", None), ("sklearn", None), ("nltk", None), ("gradio", None), ("fastapi", None) ] for module, component in imports_to_test: try: if component: exec(f"from {module} import {component}") else: exec(f"import {module}") print(f"โœ… {module}: Import successful") test_results[module] = True except Exception as e: print(f"โŒ {module}: Import failed - {e}") test_results[module] = False # Test model loading print(f"\n๐Ÿค– Testing model loading...") try: from sentence_transformers import SentenceTransformer model = SentenceTransformer('all-MiniLM-L6-v2') print("โœ… Sentence transformer: Model loaded successfully") test_results['sentence_model'] = True except Exception as e: print(f"โŒ Sentence transformer: Model loading failed - {e}") test_results['sentence_model'] = False try: from transformers import pipeline paraphraser = pipeline("text2text-generation", model="google/flan-t5-small") print("โœ… Paraphrasing model: Model loaded successfully") test_results['paraphrase_model'] = True except Exception as e: print(f"โŒ Paraphrasing model: Model loading failed - {e}") test_results['paraphrase_model'] = False # Test GPU availability try: import torch if torch.cuda.is_available(): print(f"โœ… CUDA: {torch.cuda.device_count()} GPU(s) available") test_results['gpu'] = True else: print("๐Ÿ’ป CUDA: Not available (using CPU)") test_results['gpu'] = False except: test_results['gpu'] = False return test_results def create_production_requirements(): """Create production requirements file""" requirements = """# AI Text Humanizer - Production Requirements # All features enabled with compatible versions # Core ML frameworks torch>=2.1.0 transformers==4.35.0 sentence-transformers==2.2.2 huggingface_hub==0.17.3 accelerate==0.24.1 # NLP libraries nltk==3.8.1 scikit-learn==1.3.2 numpy==1.25.2 pandas==2.1.3 # Web frameworks fastapi==0.104.1 uvicorn[standard]==0.24.0 gradio==4.7.1 python-multipart==0.0.6 aiofiles==23.2.1 requests==2.31.0 # Production libraries redis==5.0.1 psutil python-dotenv # Build tools setuptools wheel packaging """ with open("requirements-production.txt", "w") as f: f.write(requirements) print("โœ… Created requirements-production.txt") def main(): """Main installation process""" print("๐Ÿš€ AI TEXT HUMANIZER - PRODUCTION SETUP") print("======================================") # Check Python version if sys.version_info < (3, 7): print("โŒ Python 3.7+ required") return False print(f"๐Ÿ Python {sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro} detected") # Check virtual environment in_venv = hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix) if not in_venv: print("โš ๏ธ Warning: Not in virtual environment") response = input("Continue? (y/n): ").lower().strip() if response != 'y': print("๐Ÿ‘‹ Please create a virtual environment first") return False else: print("โœ… Virtual environment detected") # Start installation if not production_install(): print("\nโŒ Installation failed!") return False # Test installation test_results = test_installation() # Create requirements file create_production_requirements() # Summary print(f"\n๐Ÿ“Š INSTALLATION SUMMARY") print("=" * 30) success_count = sum(1 for v in test_results.values() if v) total_count = len(test_results) print(f"โœ… {success_count}/{total_count} components working") if test_results.get('sentence_model') and test_results.get('paraphrase_model'): print("๐ŸŽ‰ ALL ADVANCED FEATURES ENABLED!") print(" โ€ข Advanced semantic similarity โœ…") print(" โ€ข AI paraphrasing capabilities โœ…") print(" โ€ข Production-ready performance โœ…") elif test_results.get('sentence_model'): print("โš ๏ธ Advanced similarity enabled, paraphrasing needs attention") elif test_results.get('paraphrase_model'): print("โš ๏ธ Paraphrasing enabled, similarity needs attention") else: print("โŒ Advanced features need troubleshooting") print(f"\n๐ŸŽฏ NEXT STEPS:") print("1. Test: python text_humanizer_robust.py") print("2. Run API: python fastapi_server.py") print("3. Run web UI: python gradio_app.py") return success_count >= total_count - 2 # Allow 2 optional failures if __name__ == "__main__": try: success = main() if success: print(f"\n๐ŸŽ‰ Production installation successful!") else: print(f"\nโŒ Production installation needs attention") except KeyboardInterrupt: print(f"\n๐Ÿ‘‹ Installation cancelled") except Exception as e: print(f"\nโŒ Unexpected error: {e}")