timlawrenz commited on
Commit
41f7984
·
verified ·
1 Parent(s): 36315b1

Upload experiments/gnn_generation_analysis.yaml with huggingface_hub

Browse files
experiments/gnn_generation_analysis.yaml ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Fleet Spec: GNN Code Generation Failure Analysis (Track 2)
2
+ #
3
+ # Systematic analysis of code generation quality across decoder configurations.
4
+ # Tests: loss functions × decoder architectures × hidden dimensions.
5
+ #
6
+ # Launch:
7
+ # ratiocinator fleet run experiments/gnn_generation_analysis.yaml
8
+
9
+ name: gnn-generation-analysis
10
+ description: "Systematic GNN code generation failure analysis"
11
+
12
+ hardware:
13
+ gpu: "RTX 4090"
14
+ num_gpus: 1
15
+ min_cpu_ram_gb: 32
16
+ min_inet_down: 1000.0
17
+ min_cuda_version: 12.0
18
+ max_dph: 0.40
19
+ disk_gb: 50.0
20
+ image: pytorch/pytorch:2.7.0-cuda12.8-cudnn9-runtime
21
+
22
+ repo:
23
+ url: https://github.com/timlawrenz/jubilant-palm-tree.git
24
+ branch: experiment/ratiocinator-gnn-study
25
+ clone_depth: 1
26
+
27
+ data:
28
+ source: none
29
+
30
+ deps:
31
+ pre_install:
32
+ - "apt-get update -qq && apt-get install -y -qq git-lfs > /dev/null 2>&1 || true"
33
+ - "cd /workspace/experiment && git lfs install && git lfs pull"
34
+ - "pip install torch-geometric torch-scatter torch-sparse -f https://data.pyg.org/whl/torch-2.7.0+cu128.html"
35
+ - "pip install pandas tqdm sentence-transformers nltk scikit-learn numpy"
36
+ requirements: requirements.txt
37
+ exclude_from_requirements:
38
+ - torch
39
+ - torchvision
40
+ - torch_geometric
41
+ verify: "python -c \"import torch_geometric; print(f'PyG {torch_geometric.__version__}')\""
42
+
43
+ arms:
44
+ # ── Loss function comparison (GAT decoder baseline) ──
45
+ - name: improved-loss-gat
46
+ description: "Improved (cross-entropy) loss, GAT decoder"
47
+ command: "bash scripts/run_generation_arm.sh"
48
+ env:
49
+ DECODER_CONV_TYPE: "GAT"
50
+ LOSS_FN: "improved"
51
+ HIDDEN_DIM: "256"
52
+ NUM_LAYERS: "5"
53
+ EPOCHS: "30"
54
+
55
+ - name: simple-loss-gat
56
+ description: "Simple (MSE) loss, GAT decoder"
57
+ command: "bash scripts/run_generation_arm.sh"
58
+ env:
59
+ DECODER_CONV_TYPE: "GAT"
60
+ LOSS_FN: "simple"
61
+ HIDDEN_DIM: "256"
62
+ NUM_LAYERS: "5"
63
+ EPOCHS: "30"
64
+
65
+ - name: comprehensive-loss-gat
66
+ description: "Comprehensive (combined) loss, GAT decoder"
67
+ command: "bash scripts/run_generation_arm.sh"
68
+ env:
69
+ DECODER_CONV_TYPE: "GAT"
70
+ LOSS_FN: "comprehensive"
71
+ HIDDEN_DIM: "256"
72
+ NUM_LAYERS: "5"
73
+ EPOCHS: "30"
74
+
75
+ # ── Decoder architecture comparison (improved loss) ──
76
+ - name: improved-loss-sage
77
+ description: "Improved loss, SAGE decoder"
78
+ command: "bash scripts/run_generation_arm.sh"
79
+ env:
80
+ DECODER_CONV_TYPE: "SAGE"
81
+ LOSS_FN: "improved"
82
+ HIDDEN_DIM: "256"
83
+ NUM_LAYERS: "5"
84
+ EPOCHS: "30"
85
+
86
+ - name: improved-loss-gin
87
+ description: "Improved loss, GIN decoder"
88
+ command: "bash scripts/run_generation_arm.sh"
89
+ env:
90
+ DECODER_CONV_TYPE: "GIN"
91
+ LOSS_FN: "improved"
92
+ HIDDEN_DIM: "256"
93
+ NUM_LAYERS: "5"
94
+ EPOCHS: "30"
95
+
96
+ - name: improved-loss-gcn
97
+ description: "Improved loss, GCN decoder"
98
+ command: "bash scripts/run_generation_arm.sh"
99
+ env:
100
+ DECODER_CONV_TYPE: "GCN"
101
+ LOSS_FN: "improved"
102
+ HIDDEN_DIM: "256"
103
+ NUM_LAYERS: "5"
104
+ EPOCHS: "30"
105
+
106
+ # ── Wider decoder ──
107
+ - name: improved-loss-gat-wide
108
+ description: "Improved loss, GAT decoder, hidden_dim=512"
109
+ command: "bash scripts/run_generation_arm.sh"
110
+ env:
111
+ DECODER_CONV_TYPE: "GAT"
112
+ LOSS_FN: "improved"
113
+ HIDDEN_DIM: "512"
114
+ NUM_LAYERS: "5"
115
+ EPOCHS: "30"
116
+
117
+ metrics:
118
+ protocol: json_line
119
+ json_prefix: "METRICS:"
120
+
121
+ budget:
122
+ max_dollars: 10.00
123
+ train_timeout_s: 3600
124
+ download_timeout_s: 600