| """ |
| Embedded Model Training for HF Spaces |
| Fixed version with dynamic column mapping for SAP SALT dataset |
| """ |
|
|
| import pandas as pd |
| import numpy as np |
| from sklearn.ensemble import RandomForestClassifier |
| from sklearn.model_selection import train_test_split |
| from sklearn.preprocessing import LabelEncoder |
| import joblib |
| import json |
| import streamlit as st |
| from pathlib import Path |
| from datetime import datetime |
|
|
| class EmbeddedChurnTrainer: |
| """Embedded trainer with dynamic column mapping for real SAP SALT data""" |
| |
| def __init__(self): |
| self.model_path = Path('models/churn_model_v1.pkl') |
| self.metadata_path = Path('models/model_metadata.json') |
| self.model = None |
| self.label_encoders = {} |
| self.feature_columns = [] |
| self.column_mapping = {} |
| |
| def model_exists(self): |
| """Check if trained model exists""" |
| return self.model_path.exists() and self.metadata_path.exists() |
| |
| @st.cache_data |
| def load_sap_data(_self): |
| """Load real SAP SALT dataset and inspect its structure""" |
| try: |
| from datasets import load_dataset |
| |
| st.info("π Loading SAP SALT dataset from Hugging Face...") |
| |
| |
| dataset = load_dataset("SAP/SALT", split="train") |
| data_df = dataset.to_pandas() |
| |
| |
| st.info(f"π Dataset columns: {list(data_df.columns)}") |
| st.info(f"π Dataset shape: {data_df.shape}") |
| |
| |
| _self.column_mapping = _self._create_column_mapping(data_df.columns) |
| st.info(f"π Column mapping: {_self.column_mapping}") |
| |
| |
| data_df = _self._add_aggregated_fields(data_df) |
| |
| st.success(f"β
Loaded {len(data_df)} records from SAP SALT dataset") |
| return data_df |
| |
| except ImportError: |
| st.error("β Hugging Face datasets library not available") |
| raise RuntimeError("datasets library required") |
| |
| except Exception as e: |
| if "gated" in str(e).lower() or "authentication" in str(e).lower(): |
| st.error("π **SAP SALT Dataset Access Required**") |
| st.info(""" |
| **To access SAP SALT dataset:** |
| 1. Visit: https://huggingface.co/datasets/SAP/SALT |
| 2. Click "Agree and access repository" |
| 3. Add HF token to Space secrets: `HF_TOKEN` |
| 4. Restart the Space |
| """) |
| else: |
| st.error(f"β Failed to load SAP SALT dataset: {str(e)}") |
| raise |
| |
| def _create_column_mapping(self, available_columns): |
| """Create mapping from expected columns to available columns""" |
| cols = [col.upper() for col in available_columns] |
| available_upper = {col.upper(): col for col in available_columns} |
| |
| mapping = {} |
| |
| |
| customer_candidates = ['CUSTOMER', 'SOLDTOPARTY', 'CUSTOMERID', 'CUSTOMER_ID'] |
| for candidate in customer_candidates: |
| if candidate in cols: |
| mapping['Customer'] = available_upper[candidate] |
| break |
| else: |
| mapping['Customer'] = available_columns[0] if available_columns else 'Customer' |
| |
| |
| name_candidates = ['CUSTOMERNAME', 'CUSTOMER_NAME', 'NAME', 'COMPANYNAME'] |
| for candidate in name_candidates: |
| if candidate in cols: |
| mapping['CustomerName'] = available_upper[candidate] |
| break |
| else: |
| mapping['CustomerName'] = None |
| |
| |
| country_candidates = ['COUNTRY', 'COUNTRYKEY', 'COUNTRY_CODE', 'LAND1'] |
| for candidate in country_candidates: |
| if candidate in cols: |
| mapping['Country'] = available_upper[candidate] |
| break |
| else: |
| mapping['Country'] = None |
| |
| |
| group_candidates = ['CUSTOMERGROUP', 'CUSTOMER_GROUP', 'CUSTOMERCLASSIFICATION', 'KTOKD'] |
| for candidate in group_candidates: |
| if candidate in cols: |
| mapping['CustomerGroup'] = available_upper[candidate] |
| break |
| else: |
| mapping['CustomerGroup'] = None |
| |
| |
| doc_candidates = ['SALESDOCUMENT', 'SALES_DOCUMENT', 'VBELN', 'DOCUMENTNUMBER'] |
| for candidate in doc_candidates: |
| if candidate in cols: |
| mapping['SalesDocument'] = available_upper[candidate] |
| break |
| else: |
| mapping['SalesDocument'] = None |
| |
| |
| date_candidates = ['CREATIONDATE', 'CREATION_DATE', 'ERDAT', 'REQUESTEDDELIVERYDATE', 'DATE'] |
| for candidate in date_candidates: |
| if candidate in cols: |
| mapping['CreationDate'] = available_upper[candidate] |
| break |
| else: |
| mapping['CreationDate'] = None |
| |
| return mapping |
| |
| def _add_aggregated_fields(self, data): |
| """Add customer-level aggregations using dynamic column mapping""" |
| |
| customer_col = self.column_mapping.get('Customer') |
| date_col = self.column_mapping.get('CreationDate') |
| sales_doc_col = self.column_mapping.get('SalesDocument') |
| |
| if not customer_col: |
| st.error("β No customer identifier column found") |
| raise ValueError("Cannot identify customer column") |
| |
| |
| agg_dict = {} |
| |
| if sales_doc_col: |
| agg_dict[sales_doc_col] = 'count' |
| |
| if date_col: |
| agg_dict[date_col] = ['min', 'max'] |
| |
| if not agg_dict: |
| |
| data['total_orders'] = 1 |
| data['first_order_date'] = '2024-01-01' |
| data['last_order_date'] = '2024-01-01' |
| else: |
| customer_aggs = data.groupby(customer_col).agg(agg_dict).reset_index() |
| |
| |
| new_cols = [customer_col] |
| if sales_doc_col: |
| new_cols.append('total_orders') |
| if date_col: |
| new_cols.extend(['first_order_date', 'last_order_date']) |
| |
| customer_aggs.columns = new_cols |
| |
| |
| data = data.merge(customer_aggs, on=customer_col, how='left') |
| |
| |
| rename_dict = {} |
| for standard_name, actual_name in self.column_mapping.items(): |
| if actual_name and actual_name in data.columns: |
| rename_dict[actual_name] = standard_name |
| |
| if rename_dict: |
| data = data.rename(columns=rename_dict) |
| |
| return data |
| |
| def train_model_if_needed(self): |
| """Train model with proper error handling""" |
| if self.model_exists(): |
| return self.load_existing_metadata() |
| |
| progress_bar = st.progress(0) |
| status_text = st.empty() |
| |
| try: |
| |
| status_text.text("π₯ Loading SAP SALT dataset...") |
| progress_bar.progress(20) |
| data = self.load_sap_data() |
| |
| |
| status_text.text("π§ Engineering features...") |
| progress_bar.progress(40) |
| features_data = self.engineer_features(data) |
| |
| |
| status_text.text("ποΈ Training ML model...") |
| progress_bar.progress(60) |
| metrics = self.train_model(features_data) |
| |
| |
| status_text.text("πΎ Saving model...") |
| progress_bar.progress(80) |
| self.save_model_artifacts(metrics) |
| |
| |
| progress_bar.progress(100) |
| status_text.text("β
Model training complete!") |
| |
| return metrics |
| |
| except Exception as e: |
| st.error(f"β Training failed: {str(e)}") |
| raise |
| |
| def engineer_features(self, data): |
| """Feature engineering with dynamic column handling""" |
| try: |
| |
| agg_cols = ['Customer'] |
| |
| optional_cols = ['CustomerName', 'Country', 'CustomerGroup'] |
| for col in optional_cols: |
| if col in data.columns and data[col].notna().any(): |
| agg_cols.append(col) |
| |
| |
| agg_dict = {} |
| for col in agg_cols: |
| if col != 'Customer': |
| agg_dict[col] = 'first' |
| |
| |
| if 'total_orders' in data.columns: |
| agg_dict['total_orders'] = 'first' |
| if 'first_order_date' in data.columns: |
| agg_dict['first_order_date'] = 'first' |
| if 'last_order_date' in data.columns: |
| agg_dict['last_order_date'] = 'first' |
| |
| customer_features = data.groupby('Customer').agg(agg_dict).reset_index() |
| |
| |
| reference_date = pd.to_datetime('2024-12-31') |
| |
| if 'last_order_date' in customer_features.columns: |
| customer_features['last_order_date'] = pd.to_datetime(customer_features['last_order_date'], errors='coerce') |
| customer_features['Recency'] = (reference_date - customer_features['last_order_date']).dt.days |
| else: |
| customer_features['Recency'] = 100 |
| |
| if 'first_order_date' in customer_features.columns: |
| customer_features['first_order_date'] = pd.to_datetime(customer_features['first_order_date'], errors='coerce') |
| customer_features['Tenure'] = (reference_date - customer_features['first_order_date']).dt.days |
| else: |
| customer_features['Tenure'] = 365 |
| |
| |
| customer_features['Recency'] = customer_features['Recency'].fillna(365).clip(0, 3650) |
| |
| if 'total_orders' in customer_features.columns: |
| customer_features['Frequency'] = customer_features['total_orders'].fillna(1).clip(1, 1000) |
| else: |
| customer_features['Frequency'] = 1 |
| |
| customer_features['Monetary'] = (customer_features['Frequency'] * 500).clip(100, 1000000) |
| customer_features['Tenure'] = customer_features['Tenure'].fillna(365).clip(1, 3650) |
| |
| |
| tenure_months = customer_features['Tenure'] / 30 + 1 |
| customer_features['OrderVelocity'] = (customer_features['Frequency'] / tenure_months).clip(0, 50) |
| |
| |
| self.label_encoders = {} |
| categorical_features = [] |
| |
| for col in ['Country', 'CustomerGroup']: |
| if col in customer_features.columns and customer_features[col].notna().any(): |
| try: |
| self.label_encoders[col] = LabelEncoder() |
| customer_features[f'{col}_encoded'] = self.label_encoders[col].fit_transform( |
| customer_features[col].fillna('Unknown') |
| ) |
| categorical_features.append(f'{col}_encoded') |
| except Exception as e: |
| st.warning(f"β οΈ Could not encode {col}: {str(e)}") |
| |
| |
| customer_features['IsChurned'] = ( |
| (customer_features['Recency'] > 90) & |
| (customer_features['Frequency'] > 1) |
| ).astype(int) |
| |
| |
| self.feature_columns = ['Recency', 'Frequency', 'Monetary', 'Tenure', 'OrderVelocity'] |
| self.feature_columns.extend(categorical_features) |
| |
| |
| required_cols = self.feature_columns + ['IsChurned', 'Customer'] |
| |
| |
| if 'CustomerName' in customer_features.columns: |
| required_cols.append('CustomerName') |
| |
| |
| available_cols = [col for col in required_cols if col in customer_features.columns] |
| final_data = customer_features[available_cols].copy() |
| |
| |
| for col in self.feature_columns: |
| if col in final_data.columns: |
| final_data[col] = final_data[col].replace([np.inf, -np.inf], np.nan).fillna(0) |
| final_data[col] = final_data[col].clip(-1e9, 1e9) |
| |
| st.info(f"β
Features engineered: {self.feature_columns}") |
| st.info(f"π Final dataset shape: {final_data.shape}") |
| |
| return final_data |
| |
| except Exception as e: |
| st.error(f"Feature engineering failed: {str(e)}") |
| st.info(f"Available columns: {list(data.columns)}") |
| raise |
| |
| def train_model(self, data): |
| """Train model with additional validation""" |
| try: |
| |
| missing_features = [col for col in self.feature_columns if col not in data.columns] |
| if missing_features: |
| st.warning(f"β οΈ Missing features: {missing_features}") |
| |
| self.feature_columns = [col for col in self.feature_columns if col in data.columns] |
| |
| if not self.feature_columns: |
| raise ValueError("No valid features available for training") |
| |
| X = data[self.feature_columns].copy() |
| y = data['IsChurned'].copy() |
| |
| |
| if not np.isfinite(X).all().all(): |
| X = X.replace([np.inf, -np.inf], np.nan).fillna(0) |
| |
| |
| if len(X) < 50: |
| raise ValueError(f"Insufficient training data: {len(X)} samples") |
| |
| if y.nunique() < 2: |
| st.warning("β οΈ Creating artificial target variation for training...") |
| |
| variation_size = len(y) // 4 |
| y.iloc[:variation_size] = 1 - y.iloc[:variation_size] |
| |
| |
| X_train, X_test, y_train, y_test = train_test_split( |
| X, y, test_size=0.2, random_state=42, |
| stratify=y if y.nunique() > 1 else None |
| ) |
| |
| |
| self.model = RandomForestClassifier( |
| n_estimators=50, |
| max_depth=8, |
| min_samples_split=20, |
| min_samples_leaf=10, |
| class_weight='balanced', |
| random_state=42, |
| n_jobs=1 |
| ) |
| |
| self.model.fit(X_train, y_train) |
| |
| |
| train_score = self.model.score(X_train, y_train) |
| test_score = self.model.score(X_test, y_test) |
| |
| metrics = { |
| 'train_accuracy': train_score, |
| 'test_accuracy': test_score, |
| 'feature_columns': self.feature_columns, |
| 'training_samples': len(X_train), |
| 'test_samples': len(X_test), |
| 'churn_rate': float(y.mean()), |
| 'feature_importance': dict(zip(self.feature_columns, self.model.feature_importances_)), |
| 'column_mapping': self.column_mapping |
| } |
| |
| st.success(f"β
Model trained successfully! Accuracy: {test_score:.3f}") |
| |
| return metrics |
| |
| except Exception as e: |
| st.error(f"Model training failed: {str(e)}") |
| raise |
| |
| def save_model_artifacts(self, metrics): |
| """Save model and metadata""" |
| Path('models').mkdir(exist_ok=True) |
| |
| model_data = { |
| 'model': self.model, |
| 'label_encoders': self.label_encoders, |
| 'feature_columns': self.feature_columns, |
| 'column_mapping': self.column_mapping, |
| 'version': 'v1', |
| 'training_date': datetime.now().isoformat() |
| } |
| |
| joblib.dump(model_data, self.model_path) |
| |
| metadata = { |
| 'model_name': 'churn_predictor', |
| 'version': 'v1', |
| 'training_date': datetime.now().isoformat(), |
| 'metrics': metrics, |
| 'status': 'trained', |
| 'data_source': 'SAP/SALT dataset from Hugging Face', |
| 'column_mapping': self.column_mapping |
| } |
| |
| with open(self.metadata_path, 'w') as f: |
| json.dump(metadata, f, indent=2) |
| |
| def load_existing_metadata(self): |
| """Load existing model metadata""" |
| try: |
| with open(self.metadata_path, 'r') as f: |
| return json.load(f) |
| except Exception: |
| return None |
|
|