Spaces:
Running
Running
File size: 4,151 Bytes
e72f783 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 | """
Model promotion and deployment
"""
import logging
from typing import Dict, Any
from datetime import datetime
logger = logging.getLogger(__name__)
class ModelPromoter:
"""
Handles model promotion and versioning for deployment.
"""
def __init__(self, models_dir: str = "models/"):
"""
Initialize model promoter.
Args:
models_dir: Directory containing models
"""
self.models_dir = models_dir
logger.info(f"ModelPromoter initialized with models directory: {models_dir}")
def evaluate_model_quality(self, model_metrics: Dict[str, float], thresholds: Dict[str, float]) -> bool:
"""
Evaluate if model meets quality thresholds.
Args:
model_metrics: Model performance metrics
thresholds: Minimum acceptable thresholds
Returns:
True if model passes quality checks
"""
logger.info("Evaluating model quality...")
passes_all = True
for metric, threshold in thresholds.items():
actual = model_metrics.get(metric, 0.0)
if actual < threshold:
logger.warning(f"Model fails {metric} check: {actual} < {threshold}")
passes_all = False
else:
logger.info(f"Model passes {metric} check: {actual} >= {threshold}")
return passes_all
def promote_model(self, model_name: str, version: str, metrics: Dict[str, float]) -> bool:
"""
Promote model to production.
Args:
model_name: Name of the model
version: Model version
metrics: Performance metrics
Returns:
True if promotion successful
"""
logger.info(f"Promoting model {model_name} v{version} to production")
# Define quality thresholds
thresholds = {
"auroc": 0.90,
"f1_score": 0.85,
"inference_time": 150 # milliseconds
}
# Check quality
if not self.evaluate_model_quality(metrics, thresholds):
logger.error("Model does not meet quality thresholds")
return False
# Promote model
try:
promotion_record = {
"model_name": model_name,
"version": version,
"promoted_at": datetime.now().isoformat(),
"metrics": metrics,
"status": "promoted"
}
logger.info(f"Model promoted successfully: {model_name} v{version}")
return True
except Exception as e:
logger.error(f"Model promotion failed: {e}")
return False
def rollback_model(self, model_name: str, target_version: str) -> bool:
"""
Rollback to a previous model version.
Args:
model_name: Name of the model
target_version: Version to rollback to
Returns:
True if rollback successful
"""
logger.info(f"Rolling back model {model_name} to version {target_version}")
try:
# Implementation for model rollback
logger.info(f"Model rolled back successfully: {model_name} to v{target_version}")
return True
except Exception as e:
logger.error(f"Model rollback failed: {e}")
return False
def compare_models(self, model1_metrics: Dict, model2_metrics: Dict) -> Dict[str, Any]:
"""
Compare two model versions.
Args:
model1_metrics: Metrics of first model
model2_metrics: Metrics of second model
Returns:
Comparison report
"""
logger.info("Comparing model versions...")
comparison = {}
for metric in model1_metrics.keys():
diff = model2_metrics.get(metric, 0) - model1_metrics.get(metric, 0)
comparison[f"{metric}_diff"] = diff
return comparison
|