text
stringlengths
1
93.6k
if suppress_warnings:
suppress_warnings_method()
print(f"Starting analysis for {symbol}...")
print(f"Fetching stock data for {symbol}...")
data = fetch_stock_data(symbol, start_date, end_date)
print(f"Adding technical indicators...")
data = add_technical_indicators(data)
data.dropna(inplace=True)
if quick_test:
print("Quick test mode: Using only the last 100 data points.")
data = data.tail(100)
print("Preparing data for model training...")
features = [
"Close",
"Volume",
"SMA_20",
"SMA_50",
"RSI",
"MACD",
"BB_upper",
"BB_middle",
"BB_lower",
"Volatility",
"Price_Change",
"Volume_Change",
"High_Low_Range",
]
X, y, scaler = prepare_data(data[features])
print("Augmenting data...")
X_aug, y_aug = augment_data(X, y)
X = np.concatenate((X, X_aug), axis=0)
y = np.concatenate((y, y_aug), axis=0)
print("Splitting data into training and testing sets...")
X_train, X_test, y_train, y_test = time_based_train_test_split(X, y, test_size=0.2)
print("\nStarting model training and hyperparameter tuning...")
models = []
if "LSTM" in models_to_run:
models.append(("LSTM", create_lstm_model((X.shape[1], X.shape[2]))))
if "GRU" in models_to_run:
models.append(("GRU", create_gru_model((X.shape[1], X.shape[2]))))
if "Random Forest" in models_to_run:
models.append(("Random Forest", tune_random_forest(X, y, quick_test)))
if "XGBoost" in models_to_run:
models.append(("XGBoost", tune_xgboost(X, y, quick_test)))
results = {}
oof_predictions = {}
model_stats = []
with tqdm(total=len(models), desc="Training Models", position=0) as pbar:
for name, model in models:
print(f"\nTraining and evaluating {name} model...")
cv_score, cv_std, overall_score, oof_pred = train_and_evaluate_model(
model, X, y, n_splits=3 if quick_test else 5, model_name=name
)
print(f" {name} model results:")
print(f" Cross-validation R² score: {cv_score:.4f} (±{cv_std:.4f})")
print(f" Overall out-of-fold R² score: {overall_score:.4f}")
print(f"Retraining {name} model on full dataset...")
if isinstance(model, (RandomForestRegressor, XGBRegressor)):
model.fit(X.reshape(X.shape[0], -1), y)
train_score = model.score(X.reshape(X.shape[0], -1), y)
else:
with tqdm(total=100, desc="Epochs", leave=False) as epoch_pbar:
class EpochProgressCallback(Callback):
def on_epoch_end(self, epoch, logs=None):
epoch_pbar.update(1)
history = model.fit(
X,
y,
epochs=100,
batch_size=32,
verbose=0,
callbacks=[EpochProgressCallback()],
)
train_score = (
1 - history.history["loss"][-1]
) # Use final training loss as a proxy for R²
results[name] = model
oof_predictions[name] = oof_pred
overfitting_score = train_score - overall_score
model_stats.append(
{
"Model": name,
"CV R² Score": cv_score,
"CV R² Std": cv_std,
"OOF R² Score": overall_score,
"Train R² Score": train_score,