# Copyright 2026 the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from transformers.configuration_utils import PretrainedConfig class DeepseekV32Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DeepseekV32Model`]. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 154880): Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`DeepseekV32Model`] hidden_size (`int`, *optional*, defaults to 6144): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 12288): Dimension of the MLP representations. moe_intermediate_size (`int`, *optional*, defaults to 2048): Dimension of the MoE representations. num_hidden_layers (`int`, *optional*, defaults to 78): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 64): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 64): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. n_shared_experts (`int`, *optional*, defaults to 1): Number of shared experts. n_routed_experts (`int`, *optional*, defaults to 256): Number of routed experts. routed_scaling_factor (`float`, *optional*, defaults to 2.5): Scaling factor or routed experts. kv_lora_rank (`int`, *optional*, defaults to 512): Rank of the LoRA matrices for key and value projections. q_lora_rank (`int`, *optional*, defaults to 2048): Rank of the LoRA matrices for query projections. qk_rope_head_dim (`int`, *optional*, defaults to 64): Dimension of the query/key heads that use rotary position embeddings. v_head_dim (`int`, *optional*, defaults to 256): Dimension of the value heads. qk_nope_head_dim (`int`, *optional*, defaults to 192): Dimension of the query/key heads that don't use rotary position embeddings. n_group (`int`, *optional*, defaults to 1): Number of groups for routed experts. topk_group (`int`, *optional*, defaults to 1): Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups). num_experts_per_tok (`int`, *optional*, defaults to 8): Number of selected experts, None means dense model. norm_topk_prob (`bool`, *optional*, defaults to `True`): Whether to normalize the weights of the routed experts. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 202752): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*): Padding token id. bos_token_id (`int`, *optional*, defaults to 0): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 1): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_interleave (`bool`, *optional*, defaults to `True`): Whether to interleave the rotary position embeddings. first_k_dense_replace (`int`, *optional*, defaults to 3): Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head). \--k dense layers--/ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. index_topk (`int`, *optional*, defaults to 2048): Number of top tokens selected by the indexer for retrieval/attention in each step. """ model_type = "deepseek_v32" keys_to_ignore_at_inference = ["past_key_values"] base_model_tp_plan = { # TODO: only replicate attention layers when > first_k_dense_replace "layers.*.mlp.experts.*.gate_proj": "local_colwise", "layers.*.mlp.experts.*.up_proj": "local_colwise", "layers.*.mlp.experts.*.down_proj": "local_rowwise", "layers.*.mlp.experts.*": "local", # each expert is wrapped in a module list "layers.*.mlp.shared_experts.gate_proj": "local_colwise", "layers.*.mlp.shared_experts.up_proj": "local_colwise", "layers.*.mlp.shared_experts.down_proj": "local_rowwise", "layers.*.mlp.shared_experts": "local", "layers.*.mlp.gate_proj": "local_colwise", "layers.*.mlp.up_proj": "local_colwise", "layers.*.mlp.down_proj": "local_rowwise", "layers.*.mlp": "gather", # This is the only moment where results are gathered } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: int | None = 154880, hidden_size: int | None = 6144, intermediate_size: int | None = 12288, moe_intermediate_size: int | None = 2048, num_hidden_layers: int | None = 78, num_attention_heads: int | None = 64, num_key_value_heads: int | None = 64, n_shared_experts: int | None = 1, n_routed_experts: int | None = 256, routed_scaling_factor: float | None = 2.5, kv_lora_rank: int | None = 512, q_lora_rank: int | None = 2048, qk_rope_head_dim: int | None = 64, v_head_dim: int | None = 256, qk_nope_head_dim: int | None = 192, n_group: int | None = 1, topk_group: int | None = 1, num_experts_per_tok: int | None = 8, norm_topk_prob: bool | None = True, hidden_act: str | None = "silu", max_position_embeddings: int | None = 202752, initializer_range: float | None = 0.02, rms_norm_eps: int | None = 1e-5, use_cache: bool | None = True, pad_token_id: int | None = None, bos_token_id: int | None = 0, eos_token_id: int | None = 1, tie_word_embeddings: bool | None = False, rope_scaling: dict = None, rope_theta: float =10000.0, rope_interleave: bool | None = True, first_k_dense_replace: int = 3, attention_bias: bool | None = False, attention_dropout: float | None = 0.0, index_topk: int | None = 2048, index_n_heads: int = 4, index_head_dim: int = 8, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.first_k_dense_replace = first_k_dense_replace self.moe_intermediate_size = moe_intermediate_size self.num_attention_heads = num_attention_heads self.n_shared_experts = n_shared_experts self.n_routed_experts = n_routed_experts self.routed_scaling_factor = routed_scaling_factor self.kv_lora_rank = kv_lora_rank self.q_lora_rank = q_lora_rank self.qk_rope_head_dim = qk_rope_head_dim self.v_head_dim = v_head_dim self.qk_nope_head_dim = qk_nope_head_dim self.qk_head_dim = qk_nope_head_dim + qk_rope_head_dim self.head_dim = qk_rope_head_dim self.n_group = n_group self.topk_group = topk_group self.num_experts_per_tok = num_experts_per_tok self.norm_topk_prob = norm_topk_prob self.rope_interleave = rope_interleave self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.index_topk = index_topk self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.rope_scaling = rope_scaling self.rope_theta = rope_theta self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.tie_word_embeddings = tie_word_embeddings self.index_n_heads = index_n_heads self.index_head_dim = index_head_dim super().__init__(**kwargs) __all__ = ["DeepseekV32Config"]