AIHumanizer / install_production.py
Jay-Rajput's picture
adv humanizer
5c9a55b
#!/usr/bin/env python3
"""
Production Installation Script for AI Text Humanizer
Ensures all advanced features are properly installed and working
"""
import subprocess
import sys
import os
import time
def run_command(cmd, description, critical=True):
"""Run a command and handle errors"""
print(f"πŸ”„ {description}...")
try:
result = subprocess.run(cmd, shell=True, check=True, capture_output=True, text=True)
print(f"βœ… {description} - SUCCESS")
if result.stdout.strip():
print(f" Output: {result.stdout.strip()}")
return True
except subprocess.CalledProcessError as e:
print(f"❌ {description} - FAILED")
print(f" Error: {e.stderr.strip()}")
if critical:
return False
return True
def check_gpu_availability():
"""Check if CUDA/GPU is available for better performance"""
try:
result = subprocess.run(["nvidia-smi"], capture_output=True, text=True)
if result.returncode == 0:
print("πŸš€ NVIDIA GPU detected - will install CUDA support")
return True
except FileNotFoundError:
pass
print("πŸ’» No NVIDIA GPU detected - using CPU versions")
return False
def production_install():
"""Install production-grade AI Text Humanizer with all features"""
print("🏭 AI TEXT HUMANIZER - PRODUCTION INSTALLATION")
print("=" * 55)
print("πŸ“‹ This will install ALL advanced features:")
print(" ✨ Advanced semantic similarity (sentence-transformers)")
print(" 🧠 AI paraphrasing capabilities (transformers)")
print(" πŸš€ GPU acceleration (if available)")
print(" πŸ“Š Full API and web interfaces")
print("")
# Check system
has_gpu = check_gpu_availability()
print("πŸ”§ Starting production installation...")
print("-" * 40)
# Step 1: Clean existing installation
print("\nπŸ“¦ STEP 1: Cleaning existing installation")
cleanup_commands = [
"pip uninstall -y sentence-transformers transformers huggingface_hub torch torchvision torchaudio",
"pip cache purge"
]
for cmd in cleanup_commands:
run_command(cmd, "Cleaning previous installation", critical=False)
# Step 2: Upgrade pip and install build tools
print("\nπŸ”¨ STEP 2: Installing build tools")
build_commands = [
"pip install --upgrade pip setuptools wheel",
"pip install --upgrade packaging"
]
for cmd in build_commands:
if not run_command(cmd, "Installing build tools"):
return False
# Step 3: Install PyTorch (choose CPU or GPU version)
print("\n🧠 STEP 3: Installing PyTorch")
if has_gpu:
torch_cmd = "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121"
else:
torch_cmd = "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu"
if not run_command(torch_cmd, "Installing PyTorch with proper backend"):
print("⚠️ PyTorch installation failed, trying alternative...")
if not run_command("pip install torch==2.1.0", "Installing PyTorch (fallback)"):
return False
# Step 4: Install HuggingFace ecosystem with compatible versions
print("\nπŸ€— STEP 4: Installing HuggingFace ecosystem")
hf_commands = [
"pip install huggingface_hub==0.17.3",
"pip install tokenizers==0.14.1",
"pip install transformers==4.35.0",
"pip install accelerate==0.24.1"
]
for cmd in hf_commands:
if not run_command(cmd, f"Installing {cmd.split()[1]}"):
return False
# Step 5: Install sentence transformers
print("\nπŸ”€ STEP 5: Installing Sentence Transformers")
if not run_command("pip install sentence-transformers==2.2.2", "Installing Sentence Transformers"):
print("⚠️ Trying alternative installation...")
if not run_command("pip install sentence-transformers==2.1.0", "Installing Sentence Transformers (fallback)"):
return False
# Step 6: Install additional ML libraries
print("\nπŸ“Š STEP 6: Installing ML libraries")
ml_commands = [
"pip install scikit-learn==1.3.2",
"pip install numpy==1.25.2",
"pip install pandas==2.1.3",
"pip install nltk==3.8.1"
]
for cmd in ml_commands:
if not run_command(cmd, f"Installing {cmd.split()[1]}"):
return False
# Step 7: Install web frameworks
print("\n🌐 STEP 7: Installing web frameworks")
web_commands = [
"pip install fastapi==0.104.1",
"pip install uvicorn[standard]==0.24.0",
"pip install gradio==4.7.1",
"pip install python-multipart==0.0.6",
"pip install aiofiles==23.2.1",
"pip install requests==2.31.0"
]
for cmd in web_commands:
if not run_command(cmd, f"Installing {cmd.split()[1]}"):
return False
# Step 8: Install optional production libraries
print("\n⚑ STEP 8: Installing production libraries")
prod_commands = [
"pip install redis==5.0.1",
"pip install psutil",
"pip install python-dotenv"
]
for cmd in prod_commands:
run_command(cmd, f"Installing {cmd.split()[1]}", critical=False)
# Step 9: Download NLTK data
print("\nπŸ“š STEP 9: Downloading NLTK data")
nltk_downloads = [
"python -c \"import nltk; nltk.download('punkt', quiet=True)\"",
"python -c \"import nltk; nltk.download('wordnet', quiet=True)\"",
"python -c \"import nltk; nltk.download('omw-1.4', quiet=True)\"",
"python -c \"import nltk; nltk.download('stopwords', quiet=True)\""
]
for cmd in nltk_downloads:
run_command(cmd, "Downloading NLTK data", critical=False)
# Step 10: Pre-download models
print("\nπŸ€– STEP 10: Pre-downloading models")
model_downloads = [
"python -c \"from sentence_transformers import SentenceTransformer; SentenceTransformer('all-MiniLM-L6-v2')\"",
"python -c \"from transformers import pipeline; pipeline('text2text-generation', model='google/flan-t5-small')\""
]
for cmd in model_downloads:
run_command(cmd, "Pre-downloading models", critical=False)
print(f"\nπŸŽ‰ INSTALLATION COMPLETED!")
return True
def test_installation():
"""Test if all components are working"""
print(f"\nπŸ§ͺ TESTING INSTALLATION")
print("=" * 30)
test_results = {}
# Test imports
imports_to_test = [
("sentence_transformers", "SentenceTransformer"),
("transformers", "pipeline"),
("torch", None),
("sklearn", None),
("nltk", None),
("gradio", None),
("fastapi", None)
]
for module, component in imports_to_test:
try:
if component:
exec(f"from {module} import {component}")
else:
exec(f"import {module}")
print(f"βœ… {module}: Import successful")
test_results[module] = True
except Exception as e:
print(f"❌ {module}: Import failed - {e}")
test_results[module] = False
# Test model loading
print(f"\nπŸ€– Testing model loading...")
try:
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('all-MiniLM-L6-v2')
print("βœ… Sentence transformer: Model loaded successfully")
test_results['sentence_model'] = True
except Exception as e:
print(f"❌ Sentence transformer: Model loading failed - {e}")
test_results['sentence_model'] = False
try:
from transformers import pipeline
paraphraser = pipeline("text2text-generation", model="google/flan-t5-small")
print("βœ… Paraphrasing model: Model loaded successfully")
test_results['paraphrase_model'] = True
except Exception as e:
print(f"❌ Paraphrasing model: Model loading failed - {e}")
test_results['paraphrase_model'] = False
# Test GPU availability
try:
import torch
if torch.cuda.is_available():
print(f"βœ… CUDA: {torch.cuda.device_count()} GPU(s) available")
test_results['gpu'] = True
else:
print("πŸ’» CUDA: Not available (using CPU)")
test_results['gpu'] = False
except:
test_results['gpu'] = False
return test_results
def create_production_requirements():
"""Create production requirements file"""
requirements = """# AI Text Humanizer - Production Requirements
# All features enabled with compatible versions
# Core ML frameworks
torch>=2.1.0
transformers==4.35.0
sentence-transformers==2.2.2
huggingface_hub==0.17.3
accelerate==0.24.1
# NLP libraries
nltk==3.8.1
scikit-learn==1.3.2
numpy==1.25.2
pandas==2.1.3
# Web frameworks
fastapi==0.104.1
uvicorn[standard]==0.24.0
gradio==4.7.1
python-multipart==0.0.6
aiofiles==23.2.1
requests==2.31.0
# Production libraries
redis==5.0.1
psutil
python-dotenv
# Build tools
setuptools
wheel
packaging
"""
with open("requirements-production.txt", "w") as f:
f.write(requirements)
print("βœ… Created requirements-production.txt")
def main():
"""Main installation process"""
print("πŸš€ AI TEXT HUMANIZER - PRODUCTION SETUP")
print("======================================")
# Check Python version
if sys.version_info < (3, 7):
print("❌ Python 3.7+ required")
return False
print(f"🐍 Python {sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro} detected")
# Check virtual environment
in_venv = hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix)
if not in_venv:
print("⚠️ Warning: Not in virtual environment")
response = input("Continue? (y/n): ").lower().strip()
if response != 'y':
print("πŸ‘‹ Please create a virtual environment first")
return False
else:
print("βœ… Virtual environment detected")
# Start installation
if not production_install():
print("\n❌ Installation failed!")
return False
# Test installation
test_results = test_installation()
# Create requirements file
create_production_requirements()
# Summary
print(f"\nπŸ“Š INSTALLATION SUMMARY")
print("=" * 30)
success_count = sum(1 for v in test_results.values() if v)
total_count = len(test_results)
print(f"βœ… {success_count}/{total_count} components working")
if test_results.get('sentence_model') and test_results.get('paraphrase_model'):
print("πŸŽ‰ ALL ADVANCED FEATURES ENABLED!")
print(" β€’ Advanced semantic similarity βœ…")
print(" β€’ AI paraphrasing capabilities βœ…")
print(" β€’ Production-ready performance βœ…")
elif test_results.get('sentence_model'):
print("⚠️ Advanced similarity enabled, paraphrasing needs attention")
elif test_results.get('paraphrase_model'):
print("⚠️ Paraphrasing enabled, similarity needs attention")
else:
print("❌ Advanced features need troubleshooting")
print(f"\n🎯 NEXT STEPS:")
print("1. Test: python text_humanizer_robust.py")
print("2. Run API: python fastapi_server.py")
print("3. Run web UI: python gradio_app.py")
return success_count >= total_count - 2 # Allow 2 optional failures
if __name__ == "__main__":
try:
success = main()
if success:
print(f"\nπŸŽ‰ Production installation successful!")
else:
print(f"\n❌ Production installation needs attention")
except KeyboardInterrupt:
print(f"\nπŸ‘‹ Installation cancelled")
except Exception as e:
print(f"\n❌ Unexpected error: {e}")