Spaces:
Sleeping
Sleeping
| #!/usr/bin/env python3 | |
| """ | |
| Production Installation Script for AI Text Humanizer | |
| Ensures all advanced features are properly installed and working | |
| """ | |
| import subprocess | |
| import sys | |
| import os | |
| import time | |
| def run_command(cmd, description, critical=True): | |
| """Run a command and handle errors""" | |
| print(f"π {description}...") | |
| try: | |
| result = subprocess.run(cmd, shell=True, check=True, capture_output=True, text=True) | |
| print(f"β {description} - SUCCESS") | |
| if result.stdout.strip(): | |
| print(f" Output: {result.stdout.strip()}") | |
| return True | |
| except subprocess.CalledProcessError as e: | |
| print(f"β {description} - FAILED") | |
| print(f" Error: {e.stderr.strip()}") | |
| if critical: | |
| return False | |
| return True | |
| def check_gpu_availability(): | |
| """Check if CUDA/GPU is available for better performance""" | |
| try: | |
| result = subprocess.run(["nvidia-smi"], capture_output=True, text=True) | |
| if result.returncode == 0: | |
| print("π NVIDIA GPU detected - will install CUDA support") | |
| return True | |
| except FileNotFoundError: | |
| pass | |
| print("π» No NVIDIA GPU detected - using CPU versions") | |
| return False | |
| def production_install(): | |
| """Install production-grade AI Text Humanizer with all features""" | |
| print("π AI TEXT HUMANIZER - PRODUCTION INSTALLATION") | |
| print("=" * 55) | |
| print("π This will install ALL advanced features:") | |
| print(" β¨ Advanced semantic similarity (sentence-transformers)") | |
| print(" π§ AI paraphrasing capabilities (transformers)") | |
| print(" π GPU acceleration (if available)") | |
| print(" π Full API and web interfaces") | |
| print("") | |
| # Check system | |
| has_gpu = check_gpu_availability() | |
| print("π§ Starting production installation...") | |
| print("-" * 40) | |
| # Step 1: Clean existing installation | |
| print("\nπ¦ STEP 1: Cleaning existing installation") | |
| cleanup_commands = [ | |
| "pip uninstall -y sentence-transformers transformers huggingface_hub torch torchvision torchaudio", | |
| "pip cache purge" | |
| ] | |
| for cmd in cleanup_commands: | |
| run_command(cmd, "Cleaning previous installation", critical=False) | |
| # Step 2: Upgrade pip and install build tools | |
| print("\nπ¨ STEP 2: Installing build tools") | |
| build_commands = [ | |
| "pip install --upgrade pip setuptools wheel", | |
| "pip install --upgrade packaging" | |
| ] | |
| for cmd in build_commands: | |
| if not run_command(cmd, "Installing build tools"): | |
| return False | |
| # Step 3: Install PyTorch (choose CPU or GPU version) | |
| print("\nπ§ STEP 3: Installing PyTorch") | |
| if has_gpu: | |
| torch_cmd = "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121" | |
| else: | |
| torch_cmd = "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu" | |
| if not run_command(torch_cmd, "Installing PyTorch with proper backend"): | |
| print("β οΈ PyTorch installation failed, trying alternative...") | |
| if not run_command("pip install torch==2.1.0", "Installing PyTorch (fallback)"): | |
| return False | |
| # Step 4: Install HuggingFace ecosystem with compatible versions | |
| print("\nπ€ STEP 4: Installing HuggingFace ecosystem") | |
| hf_commands = [ | |
| "pip install huggingface_hub==0.17.3", | |
| "pip install tokenizers==0.14.1", | |
| "pip install transformers==4.35.0", | |
| "pip install accelerate==0.24.1" | |
| ] | |
| for cmd in hf_commands: | |
| if not run_command(cmd, f"Installing {cmd.split()[1]}"): | |
| return False | |
| # Step 5: Install sentence transformers | |
| print("\nπ€ STEP 5: Installing Sentence Transformers") | |
| if not run_command("pip install sentence-transformers==2.2.2", "Installing Sentence Transformers"): | |
| print("β οΈ Trying alternative installation...") | |
| if not run_command("pip install sentence-transformers==2.1.0", "Installing Sentence Transformers (fallback)"): | |
| return False | |
| # Step 6: Install additional ML libraries | |
| print("\nπ STEP 6: Installing ML libraries") | |
| ml_commands = [ | |
| "pip install scikit-learn==1.3.2", | |
| "pip install numpy==1.25.2", | |
| "pip install pandas==2.1.3", | |
| "pip install nltk==3.8.1" | |
| ] | |
| for cmd in ml_commands: | |
| if not run_command(cmd, f"Installing {cmd.split()[1]}"): | |
| return False | |
| # Step 7: Install web frameworks | |
| print("\nπ STEP 7: Installing web frameworks") | |
| web_commands = [ | |
| "pip install fastapi==0.104.1", | |
| "pip install uvicorn[standard]==0.24.0", | |
| "pip install gradio==4.7.1", | |
| "pip install python-multipart==0.0.6", | |
| "pip install aiofiles==23.2.1", | |
| "pip install requests==2.31.0" | |
| ] | |
| for cmd in web_commands: | |
| if not run_command(cmd, f"Installing {cmd.split()[1]}"): | |
| return False | |
| # Step 8: Install optional production libraries | |
| print("\nβ‘ STEP 8: Installing production libraries") | |
| prod_commands = [ | |
| "pip install redis==5.0.1", | |
| "pip install psutil", | |
| "pip install python-dotenv" | |
| ] | |
| for cmd in prod_commands: | |
| run_command(cmd, f"Installing {cmd.split()[1]}", critical=False) | |
| # Step 9: Download NLTK data | |
| print("\nπ STEP 9: Downloading NLTK data") | |
| nltk_downloads = [ | |
| "python -c \"import nltk; nltk.download('punkt', quiet=True)\"", | |
| "python -c \"import nltk; nltk.download('wordnet', quiet=True)\"", | |
| "python -c \"import nltk; nltk.download('omw-1.4', quiet=True)\"", | |
| "python -c \"import nltk; nltk.download('stopwords', quiet=True)\"" | |
| ] | |
| for cmd in nltk_downloads: | |
| run_command(cmd, "Downloading NLTK data", critical=False) | |
| # Step 10: Pre-download models | |
| print("\nπ€ STEP 10: Pre-downloading models") | |
| model_downloads = [ | |
| "python -c \"from sentence_transformers import SentenceTransformer; SentenceTransformer('all-MiniLM-L6-v2')\"", | |
| "python -c \"from transformers import pipeline; pipeline('text2text-generation', model='google/flan-t5-small')\"" | |
| ] | |
| for cmd in model_downloads: | |
| run_command(cmd, "Pre-downloading models", critical=False) | |
| print(f"\nπ INSTALLATION COMPLETED!") | |
| return True | |
| def test_installation(): | |
| """Test if all components are working""" | |
| print(f"\nπ§ͺ TESTING INSTALLATION") | |
| print("=" * 30) | |
| test_results = {} | |
| # Test imports | |
| imports_to_test = [ | |
| ("sentence_transformers", "SentenceTransformer"), | |
| ("transformers", "pipeline"), | |
| ("torch", None), | |
| ("sklearn", None), | |
| ("nltk", None), | |
| ("gradio", None), | |
| ("fastapi", None) | |
| ] | |
| for module, component in imports_to_test: | |
| try: | |
| if component: | |
| exec(f"from {module} import {component}") | |
| else: | |
| exec(f"import {module}") | |
| print(f"β {module}: Import successful") | |
| test_results[module] = True | |
| except Exception as e: | |
| print(f"β {module}: Import failed - {e}") | |
| test_results[module] = False | |
| # Test model loading | |
| print(f"\nπ€ Testing model loading...") | |
| try: | |
| from sentence_transformers import SentenceTransformer | |
| model = SentenceTransformer('all-MiniLM-L6-v2') | |
| print("β Sentence transformer: Model loaded successfully") | |
| test_results['sentence_model'] = True | |
| except Exception as e: | |
| print(f"β Sentence transformer: Model loading failed - {e}") | |
| test_results['sentence_model'] = False | |
| try: | |
| from transformers import pipeline | |
| paraphraser = pipeline("text2text-generation", model="google/flan-t5-small") | |
| print("β Paraphrasing model: Model loaded successfully") | |
| test_results['paraphrase_model'] = True | |
| except Exception as e: | |
| print(f"β Paraphrasing model: Model loading failed - {e}") | |
| test_results['paraphrase_model'] = False | |
| # Test GPU availability | |
| try: | |
| import torch | |
| if torch.cuda.is_available(): | |
| print(f"β CUDA: {torch.cuda.device_count()} GPU(s) available") | |
| test_results['gpu'] = True | |
| else: | |
| print("π» CUDA: Not available (using CPU)") | |
| test_results['gpu'] = False | |
| except: | |
| test_results['gpu'] = False | |
| return test_results | |
| def create_production_requirements(): | |
| """Create production requirements file""" | |
| requirements = """# AI Text Humanizer - Production Requirements | |
| # All features enabled with compatible versions | |
| # Core ML frameworks | |
| torch>=2.1.0 | |
| transformers==4.35.0 | |
| sentence-transformers==2.2.2 | |
| huggingface_hub==0.17.3 | |
| accelerate==0.24.1 | |
| # NLP libraries | |
| nltk==3.8.1 | |
| scikit-learn==1.3.2 | |
| numpy==1.25.2 | |
| pandas==2.1.3 | |
| # Web frameworks | |
| fastapi==0.104.1 | |
| uvicorn[standard]==0.24.0 | |
| gradio==4.7.1 | |
| python-multipart==0.0.6 | |
| aiofiles==23.2.1 | |
| requests==2.31.0 | |
| # Production libraries | |
| redis==5.0.1 | |
| psutil | |
| python-dotenv | |
| # Build tools | |
| setuptools | |
| wheel | |
| packaging | |
| """ | |
| with open("requirements-production.txt", "w") as f: | |
| f.write(requirements) | |
| print("β Created requirements-production.txt") | |
| def main(): | |
| """Main installation process""" | |
| print("π AI TEXT HUMANIZER - PRODUCTION SETUP") | |
| print("======================================") | |
| # Check Python version | |
| if sys.version_info < (3, 7): | |
| print("β Python 3.7+ required") | |
| return False | |
| print(f"π Python {sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro} detected") | |
| # Check virtual environment | |
| in_venv = hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix) | |
| if not in_venv: | |
| print("β οΈ Warning: Not in virtual environment") | |
| response = input("Continue? (y/n): ").lower().strip() | |
| if response != 'y': | |
| print("π Please create a virtual environment first") | |
| return False | |
| else: | |
| print("β Virtual environment detected") | |
| # Start installation | |
| if not production_install(): | |
| print("\nβ Installation failed!") | |
| return False | |
| # Test installation | |
| test_results = test_installation() | |
| # Create requirements file | |
| create_production_requirements() | |
| # Summary | |
| print(f"\nπ INSTALLATION SUMMARY") | |
| print("=" * 30) | |
| success_count = sum(1 for v in test_results.values() if v) | |
| total_count = len(test_results) | |
| print(f"β {success_count}/{total_count} components working") | |
| if test_results.get('sentence_model') and test_results.get('paraphrase_model'): | |
| print("π ALL ADVANCED FEATURES ENABLED!") | |
| print(" β’ Advanced semantic similarity β ") | |
| print(" β’ AI paraphrasing capabilities β ") | |
| print(" β’ Production-ready performance β ") | |
| elif test_results.get('sentence_model'): | |
| print("β οΈ Advanced similarity enabled, paraphrasing needs attention") | |
| elif test_results.get('paraphrase_model'): | |
| print("β οΈ Paraphrasing enabled, similarity needs attention") | |
| else: | |
| print("β Advanced features need troubleshooting") | |
| print(f"\nπ― NEXT STEPS:") | |
| print("1. Test: python text_humanizer_robust.py") | |
| print("2. Run API: python fastapi_server.py") | |
| print("3. Run web UI: python gradio_app.py") | |
| return success_count >= total_count - 2 # Allow 2 optional failures | |
| if __name__ == "__main__": | |
| try: | |
| success = main() | |
| if success: | |
| print(f"\nπ Production installation successful!") | |
| else: | |
| print(f"\nβ Production installation needs attention") | |
| except KeyboardInterrupt: | |
| print(f"\nπ Installation cancelled") | |
| except Exception as e: | |
| print(f"\nβ Unexpected error: {e}") |