Datasets:
File size: 5,270 Bytes
294bf0f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 | #!/usr/bin/env bash
# Runner script for GNN complexity prediction experiments.
# Outputs METRICS:{json} for Ratiocinator fleet parsing.
#
# Environment variables (set by Ratiocinator fleet):
# CONV_TYPE - GNN convolution type: GCN, SAGE, GAT, GIN, GraphConv (default: SAGE)
# HIDDEN_DIM - Hidden dimension (default: 64)
# NUM_LAYERS - Number of GNN layers (default: 3)
# DROPOUT - Dropout rate (default: 0.1)
# LEARNING_RATE - Learning rate (default: 0.001)
# EPOCHS - Training epochs (default: 50)
# BATCH_SIZE - Batch size (default: 32)
# DATASET_PATH - Path to dataset dir (default: dataset/)
set -uo pipefail
CONV_TYPE="${CONV_TYPE:-SAGE}"
HIDDEN_DIM="${HIDDEN_DIM:-64}"
NUM_LAYERS="${NUM_LAYERS:-3}"
DROPOUT="${DROPOUT:-0.1}"
LEARNING_RATE="${LEARNING_RATE:-0.001}"
EPOCHS="${EPOCHS:-50}"
BATCH_SIZE="${BATCH_SIZE:-32}"
DATASET_PATH="${DATASET_PATH:-dataset/}"
OUTPUT_PATH="models/experiment_model.pt"
echo "=== GNN Complexity Arm ==="
echo "CONV_TYPE=$CONV_TYPE HIDDEN_DIM=$HIDDEN_DIM NUM_LAYERS=$NUM_LAYERS"
echo "DROPOUT=$DROPOUT LR=$LEARNING_RATE EPOCHS=$EPOCHS BATCH=$BATCH_SIZE"
# Pull LFS files if they are pointers (e.g., after shallow clone)
if command -v git-lfs &>/dev/null || git lfs version &>/dev/null 2>&1; then
echo "Pulling LFS files..."
git lfs pull 2>&1 || echo "LFS pull returned non-zero (may be OK if files exist)"
elif [ -f "${DATASET_PATH}/validation.jsonl" ] && head -1 "${DATASET_PATH}/validation.jsonl" | grep -q "^version https://git-lfs"; then
echo "ERROR: LFS pointer files detected but git-lfs not installed"
echo "Install with: apt-get install -y git-lfs && git lfs pull"
exit 1
fi
# Ensure train/val split exists
if [ ! -f "${DATASET_PATH}/train.jsonl" ]; then
echo "Creating train/val split..."
python scripts/split_complexity_data.py \
--input "${DATASET_PATH}/validation.jsonl" \
--output-dir "${DATASET_PATH}"
fi
# Symlink val.jsonl as validation.jsonl if train.py expects it
if [ -f "${DATASET_PATH}/val.jsonl" ] && [ ! -f "${DATASET_PATH}/validation_split.jsonl" ]; then
cp "${DATASET_PATH}/val.jsonl" "${DATASET_PATH}/validation_split.jsonl"
fi
mkdir -p models
# Symlink validation.jsonl → val.jsonl for compatibility
if [ -f "${DATASET_PATH}/val.jsonl" ]; then
ORIG_VAL="${DATASET_PATH}/validation.jsonl"
if [ -f "$ORIG_VAL" ] && ! [ -L "$ORIG_VAL" ]; then
mv "$ORIG_VAL" "${DATASET_PATH}/validation_full.jsonl"
fi
ln -sf val.jsonl "${DATASET_PATH}/validation.jsonl"
fi
# Run training — stream output directly (no capturing)
TRAIN_LOG="/tmp/train_output_$$.log"
python train.py \
--dataset_path "$DATASET_PATH" \
--epochs "$EPOCHS" \
--output_path "$OUTPUT_PATH" \
--batch_size "$BATCH_SIZE" \
--learning_rate "$LEARNING_RATE" \
--hidden_dim "$HIDDEN_DIM" \
--num_layers "$NUM_LAYERS" \
--conv_type "$CONV_TYPE" \
--dropout "$DROPOUT" \
--num_workers 0 \
2>&1 | tee "$TRAIN_LOG"
TRAIN_RC=${PIPESTATUS[0]}
if [ "$TRAIN_RC" -ne 0 ]; then
echo "ERROR: train.py exited with code $TRAIN_RC"
echo "METRICS:{\"error\": \"training_failed\", \"exit_code\": $TRAIN_RC}"
exit 1
fi
# Extract best validation loss from training output
BEST_VAL_LOSS=$(grep "Best validation loss" "$TRAIN_LOG" | grep -oP '[\d.]+' | tail -1)
# Run evaluation to get MAE on the validation set
python -c "
import sys, os, json, torch
sys.path.insert(0, os.path.join(os.path.dirname('.'), 'src'))
from data_processing import create_data_loaders
from models import RubyComplexityGNN
import numpy as np
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
checkpoint = torch.load('$OUTPUT_PATH', map_location=device, weights_only=False)
config = checkpoint['model_config']
model = RubyComplexityGNN(
input_dim=config.get('input_dim', 74),
hidden_dim=config.get('hidden_dim', 64),
num_layers=config.get('num_layers', 3),
conv_type=config.get('conv_type', 'SAGE'),
dropout=config.get('dropout', 0.1)
).to(device)
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
val_path = os.path.join('${DATASET_PATH}', 'val.jsonl')
if not os.path.exists(val_path):
val_path = os.path.join('${DATASET_PATH}', 'validation.jsonl')
_, val_loader = create_data_loaders(val_path, val_path, batch_size=64, shuffle=False, num_workers=0)
all_preds, all_targets = [], []
with torch.no_grad():
for batch in val_loader:
batch = batch.to(device)
preds = model(batch).squeeze()
all_preds.extend(preds.cpu().numpy().tolist())
all_targets.extend(batch.y.cpu().numpy().tolist())
preds = np.array(all_preds)
targets = np.array(all_targets)
mae = float(np.mean(np.abs(preds - targets)))
mse = float(np.mean((preds - targets) ** 2))
r2 = float(1 - np.sum((targets - preds)**2) / np.sum((targets - np.mean(targets))**2))
print('METRICS:' + json.dumps({
'val_mae': round(mae, 4),
'val_mse': round(mse, 4),
'val_r2': round(r2, 4),
'best_val_loss': round(float('${BEST_VAL_LOSS:-0}'), 4),
'conv_type': '$CONV_TYPE',
'hidden_dim': $HIDDEN_DIM,
'num_layers': $NUM_LAYERS,
'dropout': $DROPOUT,
'learning_rate': $LEARNING_RATE,
'epochs': $EPOCHS
}))
" 2>&1
rm -f "$TRAIN_LOG"
|