| { |
| "_name_or_path": "normalcomputing/extended-mind-mpt-7b", |
| "architectures": [ |
| "ExtendedMptForCausalLM" |
| ], |
| "attn_config": { |
| "alibi": true, |
| "alibi_bias_max": 8, |
| "attn_impl": "torch", |
| "attn_pdrop": 0, |
| "attn_type": "multihead_attention", |
| "attn_uses_sequence_id": false, |
| "clip_qkv": null, |
| "mask_by_sim": true, |
| "memory_type": "manual", |
| "prefix_lm": false, |
| "qk_ln": false, |
| "sim_threshold": 0.25, |
| "softmax_scale": null, |
| "topk": 10, |
| "use_active_externalism": true |
| }, |
| "auto_map": { |
| "AutoConfig": "configuration.ExtendedMptConfig", |
| "AutoModelForCausalLM": "modeling.ExtendedMptForCausalLM" |
| }, |
| "d_model": 4096, |
| "emb_pdrop": 0, |
| "embedding_fraction": 1.0, |
| "expansion_ratio": 4, |
| "init_config": { |
| "emb_init_std": null, |
| "emb_init_uniform_lim": null, |
| "fan_mode": "fan_in", |
| "init_div_is_residual": true, |
| "init_gain": 0, |
| "init_nonlinearity": "relu", |
| "init_std": 0.02, |
| "name": "kaiming_normal_", |
| "verbose": 0 |
| }, |
| "init_device": "cpu", |
| "learned_pos_emb": true, |
| "logit_scale": null, |
| "max_seq_len": 2048, |
| "memory_device": "cpu", |
| "model_type": "extended-mpt", |
| "n_heads": 32, |
| "n_layers": 32, |
| "no_bias": true, |
| "norm_type": "low_precision_layernorm", |
| "resid_pdrop": 0, |
| "tokenizer_name": "EleutherAI/gpt-neox-20b", |
| "torch_dtype": "float32", |
| "transformers_version": "4.33.0", |
| "use_active_externalism_by_layer": [ |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true, |
| true |
| ], |
| "use_cache": true, |
| "verbose": 0, |
| "vocab_size": 50432 |
| } |
|
|