timlawrenz commited on
Commit
1fc5206
·
verified ·
1 Parent(s): 92ed8cd

Upload scripts/run_topology_arm.sh with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/run_topology_arm.sh +170 -0
scripts/run_topology_arm.sh ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ # Runner script for Track 4: Decoder Topology experiments.
3
+ # Tests tree-aware decoding (teacher_forced / iterative) vs chain baseline.
4
+ # Outputs METRICS:{json} for Ratiocinator fleet parsing.
5
+ #
6
+ # Environment variables (set by Ratiocinator fleet):
7
+ # DECODER_EDGE_MODE - chain, teacher_forced, iterative (default: teacher_forced)
8
+ # DECODER_CONV_TYPE - GCN, SAGE, GAT, GIN, GraphConv (default: GAT)
9
+ # HIDDEN_DIM - Hidden dimension (default: 256)
10
+ # NUM_LAYERS - Number of decoder layers (default: 5)
11
+ # LEARNING_RATE - Learning rate (default: 0.001)
12
+ # TYPE_WEIGHT - Weight for node type loss (default: 2.0)
13
+ # PARENT_WEIGHT - Weight for parent prediction loss (default: 1.0)
14
+ # LOSS_FN - Loss function: improved, comprehensive, simple, original (default: improved)
15
+ # EPOCHS - Training epochs (default: 30)
16
+ # DATASET_PATH - Path to dataset dir (default: dataset/)
17
+
18
+ set -uo pipefail
19
+
20
+ DECODER_EDGE_MODE="${DECODER_EDGE_MODE:-teacher_forced}"
21
+ DECODER_CONV_TYPE="${DECODER_CONV_TYPE:-GAT}"
22
+ HIDDEN_DIM="${HIDDEN_DIM:-256}"
23
+ NUM_LAYERS="${NUM_LAYERS:-5}"
24
+ LEARNING_RATE="${LEARNING_RATE:-0.001}"
25
+ TYPE_WEIGHT="${TYPE_WEIGHT:-2.0}"
26
+ PARENT_WEIGHT="${PARENT_WEIGHT:-1.0}"
27
+ LOSS_FN="${LOSS_FN:-improved}"
28
+ EPOCHS="${EPOCHS:-30}"
29
+ DATASET_PATH="${DATASET_PATH:-dataset/}"
30
+ OUTPUT_PATH="models/experiment_topology_decoder.pt"
31
+ ENCODER_PATH="models/best_model.pt"
32
+
33
+ echo "=== Track 4: Decoder Topology Arm ==="
34
+ echo "EDGE_MODE=$DECODER_EDGE_MODE DECODER=$DECODER_CONV_TYPE HIDDEN=$HIDDEN_DIM LAYERS=$NUM_LAYERS"
35
+ echo "LR=$LEARNING_RATE TYPE_W=$TYPE_WEIGHT PARENT_W=$PARENT_WEIGHT LOSS=$LOSS_FN"
36
+
37
+ # Pull LFS files if they are pointers (e.g., after shallow clone)
38
+ if command -v git-lfs &>/dev/null || git lfs version &>/dev/null 2>&1; then
39
+ echo "Pulling LFS files..."
40
+ git lfs pull 2>&1 || echo "LFS pull returned non-zero (may be OK if files exist)"
41
+ elif [ -f "${DATASET_PATH}/validation.jsonl" ] && head -1 "${DATASET_PATH}/validation.jsonl" | grep -q "^version https://git-lfs"; then
42
+ echo "ERROR: LFS pointer files detected but git-lfs not installed"
43
+ exit 1
44
+ fi
45
+
46
+ # Ensure train/val split exists
47
+ if [ ! -f "${DATASET_PATH}/train.jsonl" ]; then
48
+ echo "Creating train/val split..."
49
+ python scripts/split_complexity_data.py \
50
+ --input "${DATASET_PATH}/validation.jsonl" \
51
+ --output-dir "${DATASET_PATH}"
52
+ fi
53
+
54
+ # Symlink validation.jsonl → val.jsonl for compatibility
55
+ if [ -f "${DATASET_PATH}/val.jsonl" ]; then
56
+ ORIG_VAL="${DATASET_PATH}/validation.jsonl"
57
+ if [ -f "$ORIG_VAL" ] && ! [ -L "$ORIG_VAL" ]; then
58
+ mv "$ORIG_VAL" "${DATASET_PATH}/validation_full.jsonl"
59
+ fi
60
+ ln -sf val.jsonl "${DATASET_PATH}/validation.jsonl"
61
+ fi
62
+
63
+ # Need pre-trained encoder
64
+ if [ ! -f "$ENCODER_PATH" ]; then
65
+ echo "Training encoder first..."
66
+ python train.py --epochs 20 --output_path "$ENCODER_PATH" --dataset_path "$DATASET_PATH" --num_workers 0
67
+ fi
68
+
69
+ mkdir -p models
70
+
71
+ # Run autoencoder training with tree-aware decoder — stream output
72
+ TRAIN_LOG="/tmp/topo_train_$$.log"
73
+ python train_autoencoder.py \
74
+ --dataset_path "$DATASET_PATH" \
75
+ --epochs "$EPOCHS" \
76
+ --output_path "$OUTPUT_PATH" \
77
+ --encoder_weights_path "$ENCODER_PATH" \
78
+ --hidden_dim "$HIDDEN_DIM" \
79
+ --num_layers "$NUM_LAYERS" \
80
+ --decoder_conv_type "$DECODER_CONV_TYPE" \
81
+ --decoder_edge_mode "$DECODER_EDGE_MODE" \
82
+ --learning_rate "$LEARNING_RATE" \
83
+ --type_weight "$TYPE_WEIGHT" \
84
+ --parent_weight "$PARENT_WEIGHT" \
85
+ --loss_fn "$LOSS_FN" \
86
+ 2>&1 | tee "$TRAIN_LOG"
87
+
88
+ TRAIN_RC=${PIPESTATUS[0]}
89
+ if [ "$TRAIN_RC" -ne 0 ]; then
90
+ echo "ERROR: train_autoencoder.py exited with code $TRAIN_RC"
91
+ echo "METRICS:{\"error\": \"training_failed\", \"exit_code\": $TRAIN_RC}"
92
+ exit 1
93
+ fi
94
+
95
+ BEST_VAL_LOSS=$(grep "Best validation loss" "$TRAIN_LOG" | grep -oP '[\d.]+' | tail -1)
96
+
97
+ # Run syntactic validity evaluation
98
+ python -c "
99
+ import sys, os, json, torch
100
+ sys.path.insert(0, os.path.join(os.path.dirname('.'), 'src'))
101
+ from models import ASTAutoencoder
102
+ from data_processing import create_data_loaders
103
+
104
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
105
+ num_samples = 100
106
+ valid_count = 0
107
+ total = 0
108
+
109
+ try:
110
+ model = ASTAutoencoder(
111
+ encoder_input_dim=74,
112
+ node_output_dim=74,
113
+ hidden_dim=$HIDDEN_DIM,
114
+ num_layers=$NUM_LAYERS,
115
+ conv_type='SAGE',
116
+ freeze_encoder=True,
117
+ encoder_weights_path='$ENCODER_PATH',
118
+ decoder_conv_type='$DECODER_CONV_TYPE',
119
+ decoder_edge_mode='$DECODER_EDGE_MODE',
120
+ ).to(device)
121
+
122
+ checkpoint = torch.load('$OUTPUT_PATH', map_location=device, weights_only=False)
123
+ model.decoder.load_state_dict(checkpoint['decoder_state_dict'])
124
+ model.eval()
125
+
126
+ # Load val data
127
+ val_path = os.path.join('${DATASET_PATH}', 'val.jsonl')
128
+ if not os.path.exists(val_path):
129
+ val_path = os.path.join('${DATASET_PATH}', 'validation.jsonl')
130
+ _, val_loader = create_data_loaders(val_path, val_path, batch_size=1, shuffle=False, num_workers=0)
131
+
132
+ with torch.no_grad():
133
+ for batch in val_loader:
134
+ if total >= num_samples:
135
+ break
136
+ batch = batch.to(device)
137
+ result = model(batch)
138
+ recon = result['reconstruction']
139
+ node_feats = recon.get('node_features') if isinstance(recon, dict) else None
140
+ if node_feats is not None:
141
+ pred_types = node_feats.argmax(dim=-1)
142
+ unique_types = len(pred_types.unique())
143
+ if unique_types > 2:
144
+ valid_count += 1
145
+ total += 1
146
+
147
+ validity_pct = (valid_count / total * 100) if total > 0 else 0.0
148
+ except Exception as e:
149
+ validity_pct = 0.0
150
+ total = num_samples
151
+ print(f'Eval error: {e}', file=sys.stderr)
152
+
153
+ print('METRICS:' + json.dumps({
154
+ 'syntactic_validity_pct': round(validity_pct, 2),
155
+ 'val_loss': round(float('${BEST_VAL_LOSS:-0}'), 4),
156
+ 'samples_evaluated': total,
157
+ 'valid_samples': valid_count,
158
+ 'decoder_edge_mode': '$DECODER_EDGE_MODE',
159
+ 'decoder_conv_type': '$DECODER_CONV_TYPE',
160
+ 'hidden_dim': $HIDDEN_DIM,
161
+ 'num_layers': $NUM_LAYERS,
162
+ 'loss_fn': '$LOSS_FN',
163
+ 'type_weight': $TYPE_WEIGHT,
164
+ 'parent_weight': $PARENT_WEIGHT,
165
+ 'learning_rate': $LEARNING_RATE,
166
+ 'epochs': $EPOCHS,
167
+ }))
168
+ " 2>&1
169
+
170
+ rm -f "$TRAIN_LOG"