paultltc commited on
Commit
1694c12
·
verified ·
1 Parent(s): 3b5c057

Replace adapter repo with merged full checkpoint

Browse files
Files changed (5) hide show
  1. BUILD_INFO.json +8 -0
  2. config.json +106 -0
  3. model.safetensors +3 -0
  4. tokenizer.json +2 -14
  5. tokenizer_config.json +0 -7
BUILD_INFO.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "description": "Merged full-model checkpoint built from ModernVBERT/modernvbert-embed and ModernVBERT/bimodernvbert.",
3
+ "base_repo": "ModernVBERT/modernvbert-embed",
4
+ "adapter_repo": "ModernVBERT/bimodernvbert",
5
+ "scale": 1.0,
6
+ "merged_lora_targets": 88,
7
+ "output_tensor_count": 343
8
+ }
config.json ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "image_token_id": 50407,
3
+ "initializer_range": 0.02,
4
+ "model_type": "modernvbert",
5
+ "pixel_shuffle_factor": 4,
6
+ "text_config": {
7
+ "_name_or_path": "ettin-encoder-150m",
8
+ "architectures": [
9
+ "ModernBertForMaskedLM"
10
+ ],
11
+ "attention_bias": false,
12
+ "attention_dropout": 0.0,
13
+ "causal_mask": false,
14
+ "classifier_activation": "gelu",
15
+ "classifier_bias": false,
16
+ "classifier_dropout": 0.0,
17
+ "classifier_pooling": "mean",
18
+ "cls_token_id": 50281,
19
+ "decoder_bias": true,
20
+ "deterministic_flash_attn": false,
21
+ "dtype": "float32",
22
+ "embedding_dropout": 0.0,
23
+ "global_attn_every_n_layers": 3,
24
+ "global_rope_theta": 160000.0,
25
+ "gradient_checkpointing": false,
26
+ "hidden_activation": "gelu",
27
+ "hidden_size": 768,
28
+ "initializer_cutoff_factor": 2.0,
29
+ "initializer_range": 0.02,
30
+ "intermediate_size": 1152,
31
+ "is_causal": false,
32
+ "layer_norm_eps": 1e-05,
33
+ "layer_types": [
34
+ "full_attention",
35
+ "sliding_attention",
36
+ "sliding_attention",
37
+ "full_attention",
38
+ "sliding_attention",
39
+ "sliding_attention",
40
+ "full_attention",
41
+ "sliding_attention",
42
+ "sliding_attention",
43
+ "full_attention",
44
+ "sliding_attention",
45
+ "sliding_attention",
46
+ "full_attention",
47
+ "sliding_attention",
48
+ "sliding_attention",
49
+ "full_attention",
50
+ "sliding_attention",
51
+ "sliding_attention",
52
+ "full_attention",
53
+ "sliding_attention",
54
+ "sliding_attention",
55
+ "full_attention"
56
+ ],
57
+ "local_attention": 128,
58
+ "local_rope_theta": 160000.0,
59
+ "max_position_embeddings": 7999,
60
+ "mlp_bias": false,
61
+ "mlp_dropout": 0.0,
62
+ "model_type": "modernbert",
63
+ "norm_bias": false,
64
+ "norm_eps": 1e-05,
65
+ "num_attention_heads": 12,
66
+ "num_hidden_layers": 22,
67
+ "position_embedding_type": "sans_pos",
68
+ "repad_logits_with_grad": false,
69
+ "rope_parameters": {
70
+ "full_attention": {
71
+ "rope_theta": 160000.0,
72
+ "rope_type": "default"
73
+ },
74
+ "sliding_attention": {
75
+ "rope_theta": 160000.0,
76
+ "rope_type": "default"
77
+ }
78
+ },
79
+ "sparse_pred_ignore_index": -100,
80
+ "sparse_prediction": false,
81
+ "vocab_size": 50408
82
+ },
83
+ "transformers_version": "5.0.0.dev0",
84
+ "vision_config": {
85
+ "attention_dropout": 0.0,
86
+ "hidden_act": "gelu_pytorch_tanh",
87
+ "hidden_size": 768,
88
+ "image_size": 512,
89
+ "intermediate_size": 3072,
90
+ "layer_norm_eps": 1e-06,
91
+ "model_type": "siglip_vision_model",
92
+ "num_attention_heads": 12,
93
+ "num_channels": 3,
94
+ "num_hidden_layers": 12,
95
+ "patch_size": 16
96
+ },
97
+ "tie_word_embeddings": false,
98
+ "architectures": [
99
+ "BiModernVBert"
100
+ ],
101
+ "freeze_config": {
102
+ "freeze_lm_head": true,
103
+ "freeze_text_layers": true,
104
+ "freeze_vision_layers": true
105
+ }
106
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:622336c3bbf33c068d8cf47168d0df2783b2be978c052452515435cd2eda7088
3
+ size 1008053496
tokenizer.json CHANGED
@@ -1,19 +1,7 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 4096,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
- "padding": {
10
- "strategy": "BatchLongest",
11
- "direction": "Left",
12
- "pad_to_multiple_of": null,
13
- "pad_id": 50283,
14
- "pad_type_id": 0,
15
- "pad_token": "[PAD]"
16
- },
17
  "added_tokens": [
18
  {
19
  "id": 0,
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
 
 
 
 
 
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 0,
tokenizer_config.json CHANGED
@@ -1259,7 +1259,6 @@
1259
  "extra_special_tokens": {},
1260
  "legacy": false,
1261
  "mask_token": "[MASK]",
1262
- "max_length": 4096,
1263
  "model_input_names": [
1264
  "input_ids",
1265
  "attention_mask",
@@ -1267,15 +1266,9 @@
1267
  "pixel_attention_mask"
1268
  ],
1269
  "model_max_length": 8192,
1270
- "pad_to_multiple_of": null,
1271
  "pad_token": "[PAD]",
1272
- "pad_token_type_id": 0,
1273
- "padding_side": "left",
1274
  "processor_class": "BiModernVBertProcessor",
1275
  "sep_token": "[SEP]",
1276
- "stride": 0,
1277
  "tokenizer_class": "PreTrainedTokenizerFast",
1278
- "truncation_side": "right",
1279
- "truncation_strategy": "longest_first",
1280
  "unk_token": "[UNK]"
1281
  }
 
1259
  "extra_special_tokens": {},
1260
  "legacy": false,
1261
  "mask_token": "[MASK]",
 
1262
  "model_input_names": [
1263
  "input_ids",
1264
  "attention_mask",
 
1266
  "pixel_attention_mask"
1267
  ],
1268
  "model_max_length": 8192,
 
1269
  "pad_token": "[PAD]",
 
 
1270
  "processor_class": "BiModernVBertProcessor",
1271
  "sep_token": "[SEP]",
 
1272
  "tokenizer_class": "PreTrainedTokenizerFast",
 
 
1273
  "unk_token": "[UNK]"
1274
  }