File size: 2,505 Bytes
9b43be1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 | {
"alpha_decoder": {
"hidden_act": {
"name": "torch.nn.GELU"
},
"hidden_layers": [
256,
256
],
"name": "fim.models.blocks.base.MLP"
},
"architectures": [
"FIMHawkes"
],
"auto_map": {
"AutoConfig": "configuration_hawkes.FIMHawkesConfig",
"AutoModel": "modeling_hawkes.FIMHawkes"
},
"beta_decoder": {
"hidden_act": {
"name": "torch.nn.GELU"
},
"hidden_layers": [
256,
256
],
"name": "fim.models.blocks.base.MLP"
},
"context_summary_encoder": {
"encoder_layer": {
"batch_first": true,
"dropout": 0.0,
"name": "torch.nn.TransformerEncoderLayer",
"nhead": 4
},
"name": "torch.nn.TransformerEncoder",
"num_layers": 2
},
"context_summary_pooling": {
"attention": {
"nhead": 4
},
"name": "fim.models.blocks.neural_operators.AttentionOperator",
"num_res_layers": 1,
"paths_block_attention": false
},
"context_ts_encoder": {
"encoder_layer": {
"batch_first": true,
"dropout": 0.0,
"name": "torch.nn.TransformerEncoderLayer",
"nhead": 4
},
"name": "torch.nn.TransformerEncoder",
"num_layers": 4
},
"decoder_ts": {
"decoder_layer": {
"batch_first": true,
"dropout": 0.0,
"name": "torch.nn.TransformerDecoderLayer",
"nhead": 4
},
"name": "torch.nn.TransformerDecoder",
"num_layers": 4
},
"delta_time_encoder": {
"name": "fim.models.blocks.positional_encodings.SineTimeEncoding",
"out_features": 256
},
"evaluation_mark_encoder": {
"name": "torch.nn.Linear"
},
"hidden_act": {
"name": "torch.nn.GELU"
},
"hidden_dim": 256,
"loss_weights": {
"alpha": 0.0,
"mu": 0.0,
"nll": 1.0,
"relative_spike": 0.0,
"smape": 0.0
},
"mark_encoder": {
"name": "torch.nn.Linear",
"out_features": 256
},
"mark_fusion_attention": null,
"max_num_marks": 22,
"mu_decoder": {
"hidden_act": {
"name": "torch.nn.GELU"
},
"hidden_layers": [
256,
256
],
"name": "fim.models.blocks.base.MLP"
},
"nll": {
"method": "monte_carlo",
"num_integration_points": 200
},
"normalize_by_max_time": false,
"normalize_times": true,
"thinning": null,
"time_encoder": {
"name": "fim.models.blocks.positional_encodings.SineTimeEncoding",
"out_features": 256
},
"torch_dtype": "float32",
"transformers_version": "4.46.0",
"model_type": "fimhawkes"
}
|