| { |
| "alpha_decoder": { |
| "hidden_act": { |
| "name": "torch.nn.GELU" |
| }, |
| "hidden_layers": [ |
| 256, |
| 256 |
| ], |
| "name": "fim.models.blocks.base.MLP" |
| }, |
| "architectures": [ |
| "FIMHawkes" |
| ], |
| "auto_map": { |
| "AutoConfig": "configuration_hawkes.FIMHawkesConfig", |
| "AutoModel": "modeling_hawkes.FIMHawkes" |
| }, |
| "beta_decoder": { |
| "hidden_act": { |
| "name": "torch.nn.GELU" |
| }, |
| "hidden_layers": [ |
| 256, |
| 256 |
| ], |
| "name": "fim.models.blocks.base.MLP" |
| }, |
| "context_summary_encoder": { |
| "encoder_layer": { |
| "batch_first": true, |
| "dropout": 0.0, |
| "name": "torch.nn.TransformerEncoderLayer", |
| "nhead": 4 |
| }, |
| "name": "torch.nn.TransformerEncoder", |
| "num_layers": 2 |
| }, |
| "context_summary_pooling": { |
| "attention": { |
| "nhead": 4 |
| }, |
| "name": "fim.models.blocks.neural_operators.AttentionOperator", |
| "num_res_layers": 1, |
| "paths_block_attention": false |
| }, |
| "context_ts_encoder": { |
| "encoder_layer": { |
| "batch_first": true, |
| "dropout": 0.0, |
| "name": "torch.nn.TransformerEncoderLayer", |
| "nhead": 4 |
| }, |
| "name": "torch.nn.TransformerEncoder", |
| "num_layers": 4 |
| }, |
| "decoder_ts": { |
| "decoder_layer": { |
| "batch_first": true, |
| "dropout": 0.0, |
| "name": "torch.nn.TransformerDecoderLayer", |
| "nhead": 4 |
| }, |
| "name": "torch.nn.TransformerDecoder", |
| "num_layers": 4 |
| }, |
| "delta_time_encoder": { |
| "name": "fim.models.blocks.positional_encodings.SineTimeEncoding", |
| "out_features": 256 |
| }, |
| "evaluation_mark_encoder": { |
| "name": "torch.nn.Linear" |
| }, |
| "hidden_act": { |
| "name": "torch.nn.GELU" |
| }, |
| "hidden_dim": 256, |
| "loss_weights": { |
| "alpha": 0.0, |
| "mu": 0.0, |
| "nll": 1.0, |
| "relative_spike": 0.0, |
| "smape": 0.0 |
| }, |
| "mark_encoder": { |
| "name": "torch.nn.Linear", |
| "out_features": 256 |
| }, |
| "mark_fusion_attention": null, |
| "max_num_marks": 22, |
| "mu_decoder": { |
| "hidden_act": { |
| "name": "torch.nn.GELU" |
| }, |
| "hidden_layers": [ |
| 256, |
| 256 |
| ], |
| "name": "fim.models.blocks.base.MLP" |
| }, |
| "nll": { |
| "method": "monte_carlo", |
| "num_integration_points": 200 |
| }, |
| "normalize_by_max_time": false, |
| "normalize_times": true, |
| "thinning": null, |
| "time_encoder": { |
| "name": "fim.models.blocks.positional_encodings.SineTimeEncoding", |
| "out_features": 256 |
| }, |
| "torch_dtype": "float32", |
| "transformers_version": "4.46.0", |
| "model_type": "fimhawkes" |
| } |
|
|