cesarali commited on
Commit
efc37c1
·
verified ·
1 Parent(s): cc5fdda

best val_rmse 0.0546

Browse files
Files changed (2) hide show
  1. config.json +31 -19
  2. pytorch_model.bin +2 -2
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_val_loss": 0.010676275007426739,
3
  "comet_ai_key": null,
4
  "context_observations": {
5
  "add_rem": true,
@@ -14,11 +14,23 @@
14
  },
15
  "debug_test": true,
16
  "dosing": {
17
- "dose": 1.0,
18
- "route": "oral",
 
 
 
 
 
 
 
 
 
 
 
 
19
  "time": 0.0
20
  },
21
- "experiment_dir": "/home/cesarali/Pharma/generative_pk/results/comet/node_pk_compartments/9644281f488f4122a2fbf3ee4057e0cb",
22
  "experiment_indentifier": null,
23
  "experiment_name": "node_pk_compartments",
24
  "hf_model_card_path": [
@@ -167,42 +179,42 @@
167
  "name_str": "NeuralProcessPK",
168
  "network": {
169
  "activation": "ReLU",
170
- "aggregator_num_heads": 2,
171
  "aggregator_type": "attention",
172
  "combine_latent_mode": "mlp",
173
  "cov_proj_dim": 16,
174
  "decoder_attention_layers": 2,
175
- "decoder_hidden_dim": 128,
176
  "decoder_name": "TransformerDecoder",
177
- "decoder_num_layers": 2,
178
- "decoder_rnn_hidden_dim": 200,
179
  "drift_activation": "Tanh",
180
- "drift_num_layers": 3,
181
  "dropout": 0.1,
182
- "encoder_rnn_hidden_dim": 200,
183
  "exclusive_node_step": false,
184
  "individual_encoder_name": "RNNContextEncoder",
185
  "individual_encoder_number_of_heads": 4,
186
- "init_hidden_num_layers": 2,
187
  "input_encoding_hidden_dim": 128,
188
- "loss_name": "log_nll",
189
  "node_step": true,
190
  "norm": "layer",
191
- "output_head_num_layers": 2,
192
- "prediction_latent_deterministic": true,
193
  "prediction_only": false,
194
  "rnn_decoder_number_of_layers": 4,
195
- "rnn_individual_encoder_number_of_layers": 2,
196
- "study_latent_deterministic": true,
197
- "time_obs_encoder_hidden_dim": 128,
198
- "time_obs_encoder_output_dim": 128,
199
  "use_attention": true,
200
  "use_invariance_loss": true,
201
  "use_kl_i": true,
202
  "use_kl_i_np": true,
203
  "use_kl_init": true,
204
  "use_kl_s": true,
205
- "zi_latent_dim": 200
206
  },
207
  "run_index": 0,
208
  "tags": [
 
1
  {
2
+ "best_val_loss": 0.05456165224313736,
3
  "comet_ai_key": null,
4
  "context_observations": {
5
  "add_rem": true,
 
14
  },
15
  "debug_test": true,
16
  "dosing": {
17
+ "logdose_mean_range": [
18
+ -2.0,
19
+ 2.0
20
+ ],
21
+ "logdose_std_range": [
22
+ 0.1,
23
+ 0.5
24
+ ],
25
+ "num_individuals": 10,
26
+ "route_options": [
27
+ "oral",
28
+ "iv"
29
+ ],
30
+ "same_route": true,
31
  "time": 0.0
32
  },
33
+ "experiment_dir": "/home/cesarali/Pharma/generative_pk/results/comet/node_pk_compartments/ac1495db69aa4ea68f6891e0bb9510cb",
34
  "experiment_indentifier": null,
35
  "experiment_name": "node_pk_compartments",
36
  "hf_model_card_path": [
 
179
  "name_str": "NeuralProcessPK",
180
  "network": {
181
  "activation": "ReLU",
182
+ "aggregator_num_heads": 8,
183
  "aggregator_type": "attention",
184
  "combine_latent_mode": "mlp",
185
  "cov_proj_dim": 16,
186
  "decoder_attention_layers": 2,
187
+ "decoder_hidden_dim": 512,
188
  "decoder_name": "TransformerDecoder",
189
+ "decoder_num_layers": 4,
190
+ "decoder_rnn_hidden_dim": 256,
191
  "drift_activation": "Tanh",
192
+ "drift_num_layers": 2,
193
  "dropout": 0.1,
194
+ "encoder_rnn_hidden_dim": 256,
195
  "exclusive_node_step": false,
196
  "individual_encoder_name": "RNNContextEncoder",
197
  "individual_encoder_number_of_heads": 4,
198
+ "init_hidden_num_layers": 4,
199
  "input_encoding_hidden_dim": 128,
200
+ "loss_name": "nll",
201
  "node_step": true,
202
  "norm": "layer",
203
+ "output_head_num_layers": 3,
204
+ "prediction_latent_deterministic": false,
205
  "prediction_only": false,
206
  "rnn_decoder_number_of_layers": 4,
207
+ "rnn_individual_encoder_number_of_layers": 4,
208
+ "study_latent_deterministic": false,
209
+ "time_obs_encoder_hidden_dim": 256,
210
+ "time_obs_encoder_output_dim": 256,
211
  "use_attention": true,
212
  "use_invariance_loss": true,
213
  "use_kl_i": true,
214
  "use_kl_i_np": true,
215
  "use_kl_init": true,
216
  "use_kl_s": true,
217
+ "zi_latent_dim": 256
218
  },
219
  "run_index": 0,
220
  "tags": [
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af3774445653c33649c00e1393bc0bba330f0f0bf8dfe141b40695de21fd8c5d
3
- size 6044367
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:129d38d12fc081915170db87990130fda18640e5db789d00d303d9667e7d9ad2
3
+ size 38753907