| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| import math |
| from dataclasses import dataclass |
| from typing import Callable, Optional, Union |
|
|
| import numpy as np |
| import torch |
| from torch import nn |
| from torch.nn import functional as F |
|
|
| from transformers.activations import ACT2FN |
| from transformers.cache_utils import Cache, DynamicCache |
| from transformers.generation import GenerationMixin |
| from transformers.integrations import use_kernel_forward_from_hub |
| from transformers.masking_utils import create_causal_mask |
| from transformers.modeling_flash_attention_utils import FlashAttentionKwargs |
| from transformers.modeling_layers import GradientCheckpointingLayer |
| from transformers.modeling_outputs import ( |
| BaseModelOutput, |
| BaseModelOutputWithPast, |
| MoeCausalLMOutputWithPast, |
| ) |
| from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update |
| from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel |
| from transformers.processing_utils import Unpack |
| from transformers.utils import auto_docstring, can_return_tuple |
| from transformers.utils.deprecation import deprecate_kwarg |
| from transformers.utils.generic import TransformersKwargs, check_model_inputs |
|
|
| from .configuration_qwen3_asr import ( |
| Qwen3ASRAudioEncoderConfig, |
| Qwen3ASRConfig, |
| Qwen3ASRThinkerConfig, |
| ) |
|
|
|
|
| @use_kernel_forward_from_hub("RMSNorm") |
| class Qwen3ASRTextRMSNorm(nn.Module): |
| def __init__(self, hidden_size, eps: float = 1e-6) -> None: |
| """ |
| Qwen3ASRTextRMSNorm is equivalent to T5LayerNorm |
| """ |
| super().__init__() |
| self.weight = nn.Parameter(torch.ones(hidden_size)) |
| self.variance_epsilon = eps |
|
|
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| input_dtype = hidden_states.dtype |
| hidden_states = hidden_states.to(torch.float32) |
| variance = hidden_states.pow(2).mean(-1, keepdim=True) |
| hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) |
| return self.weight * hidden_states.to(input_dtype) |
|
|
| def extra_repr(self): |
| return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" |
|
|
|
|
| def rotate_half(x): |
| """Rotates half the hidden dims of the input.""" |
| x1 = x[..., : x.shape[-1] // 2] |
| x2 = x[..., x.shape[-1] // 2 :] |
| return torch.cat((-x2, x1), dim=-1) |
|
|
|
|
| def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: |
| """ |
| This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, |
| num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) |
| """ |
| batch, num_key_value_heads, slen, head_dim = hidden_states.shape |
| if n_rep == 1: |
| return hidden_states |
| hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) |
| return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) |
|
|
|
|
| def eager_attention_forward( |
| module: nn.Module, |
| query: torch.Tensor, |
| key: torch.Tensor, |
| value: torch.Tensor, |
| attention_mask: Optional[torch.Tensor], |
| scaling: float, |
| dropout: float = 0.0, |
| **kwargs: Unpack[TransformersKwargs], |
| ): |
| key_states = repeat_kv(key, module.num_key_value_groups) |
| value_states = repeat_kv(value, module.num_key_value_groups) |
|
|
| attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling |
| if attention_mask is not None: |
| causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] |
| attn_weights = attn_weights + causal_mask |
|
|
| attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) |
| attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) |
| attn_output = torch.matmul(attn_weights, value_states) |
| attn_output = attn_output.transpose(1, 2).contiguous() |
|
|
| return attn_output, attn_weights |
|
|
|
|
| def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): |
| """Applies Rotary Position Embedding to the query and key tensors. |
| |
| Args: |
| q (`torch.Tensor`): The query tensor. |
| k (`torch.Tensor`): The key tensor. |
| cos (`torch.Tensor`): The cosine part of the rotary embedding. |
| sin (`torch.Tensor`): The sine part of the rotary embedding. |
| position_ids (`torch.Tensor`, *optional*): |
| Deprecated and unused. |
| unsqueeze_dim (`int`, *optional*, defaults to 1): |
| The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and |
| sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note |
| that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and |
| k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes |
| cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have |
| the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. |
| Returns: |
| `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. |
| """ |
| cos = cos.unsqueeze(unsqueeze_dim) |
| sin = sin.unsqueeze(unsqueeze_dim) |
| q_embed = (q * cos) + (rotate_half(q) * sin) |
| k_embed = (k * cos) + (rotate_half(k) * sin) |
| return q_embed, k_embed |
|
|
|
|
| class Qwen3ASRTextAttention(nn.Module): |
| """Multi-headed attention from 'Attention Is All You Need' paper""" |
|
|
| def __init__(self, config: Qwen3ASRConfig, layer_idx: int): |
| super().__init__() |
| self.config = config |
| self.layer_idx = layer_idx |
| self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) |
| self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads |
| self.scaling = self.head_dim**-0.5 |
| self.attention_dropout = config.attention_dropout |
| self.is_causal = True |
|
|
| self.q_proj = nn.Linear( |
| config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias |
| ) |
| self.k_proj = nn.Linear( |
| config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias |
| ) |
| self.v_proj = nn.Linear( |
| config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias |
| ) |
| self.o_proj = nn.Linear( |
| config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias |
| ) |
| self.q_norm = Qwen3ASRTextRMSNorm( |
| self.head_dim, eps=config.rms_norm_eps |
| ) |
| self.k_norm = Qwen3ASRTextRMSNorm( |
| self.head_dim, eps=config.rms_norm_eps |
| ) |
|
|
| @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") |
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| position_embeddings: tuple[torch.Tensor, torch.Tensor], |
| attention_mask: Optional[torch.Tensor], |
| past_key_values: Optional[Cache] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| **kwargs: Unpack[FlashAttentionKwargs], |
| ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: |
| input_shape = hidden_states.shape[:-1] |
| hidden_shape = (*input_shape, -1, self.head_dim) |
|
|
| query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2) |
| key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2) |
| value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) |
|
|
| cos, sin = position_embeddings |
| query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) |
|
|
| if past_key_values is not None: |
| |
| cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} |
| key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) |
|
|
| attention_interface: Callable = eager_attention_forward |
| if self.config._attn_implementation != "eager": |
| attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] |
|
|
| attn_output, attn_weights = attention_interface( |
| self, |
| query_states, |
| key_states, |
| value_states, |
| attention_mask, |
| dropout=0.0 if not self.training else self.attention_dropout, |
| scaling=self.scaling, |
| **kwargs, |
| ) |
|
|
| attn_output = attn_output.reshape(*input_shape, -1).contiguous() |
| attn_output = self.o_proj(attn_output) |
| return attn_output, attn_weights |
|
|
|
|
| class Qwen3ASRTextMLP(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.config = config |
| self.hidden_size = config.hidden_size |
| self.intermediate_size = config.intermediate_size |
| self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) |
| self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) |
| self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) |
| self.act_fn = ACT2FN[config.hidden_act] |
|
|
| def forward(self, x): |
| down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) |
| return down_proj |
|
|
|
|
| class Qwen3ASRThinkerTextDecoderLayer(GradientCheckpointingLayer): |
| def __init__(self, config: Qwen3ASRConfig, layer_idx: int): |
| super().__init__() |
| self.hidden_size = config.hidden_size |
|
|
| self.self_attn = Qwen3ASRTextAttention(config=config, layer_idx=layer_idx) |
|
|
| self.mlp = Qwen3ASRTextMLP(config) |
| self.input_layernorm = Qwen3ASRTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| self.post_attention_layernorm = Qwen3ASRTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
|
|
| @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") |
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| position_embeddings: tuple[torch.Tensor, torch.Tensor], |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Cache] = None, |
| use_cache: Optional[bool] = False, |
| cache_position: Optional[torch.LongTensor] = None, |
| **kwargs: Unpack[TransformersKwargs], |
| ) -> torch.Tensor: |
| residual = hidden_states |
| hidden_states = self.input_layernorm(hidden_states) |
| |
| hidden_states, _ = self.self_attn( |
| hidden_states=hidden_states, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| past_key_values=past_key_values, |
| use_cache=use_cache, |
| cache_position=cache_position, |
| position_embeddings=position_embeddings, |
| **kwargs, |
| ) |
| hidden_states = residual + hidden_states |
|
|
| |
| residual = hidden_states |
| hidden_states = self.post_attention_layernorm(hidden_states) |
| hidden_states = self.mlp(hidden_states) |
| hidden_states = residual + hidden_states |
| return hidden_states |
|
|
|
|
| @auto_docstring |
| class Qwen3ASRPreTrainedModel(PreTrainedModel): |
| config: Qwen3ASRConfig |
| base_model_prefix = "model" |
| supports_gradient_checkpointing = True |
| _skip_keys_device_placement = "past_key_values" |
| _supports_flash_attn = True |
| _supports_sdpa = True |
|
|
| _can_compile_fullgraph = True |
| _supports_attention_backend = True |
| _can_record_outputs = { |
| "attentions": Qwen3ASRTextAttention, |
| } |
|
|
|
|
| @dataclass |
| class Qwen3ASRThinkerCausalLMOutputWithPast(MoeCausalLMOutputWithPast): |
| r""" |
| Args: |
| rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): |
| The rope index difference between sequence length and multimodal rope. |
| """ |
|
|
| rope_deltas: Optional[torch.LongTensor] = None |
|
|
|
|
| def _get_feat_extract_output_lengths(input_lengths): |
| """ |
| Computes the output length of the convolutional layers and the output length of the audio encoder |
| """ |
|
|
| input_lengths_leave = input_lengths % 100 |
| feat_lengths = (input_lengths_leave - 1) // 2 + 1 |
| output_lengths = ((feat_lengths - 1) // 2 + 1 - 1) // 2 + 1 + (input_lengths // 100) * 13 |
| return output_lengths |
|
|
|
|
| class Qwen3ASRPreTrainedModelForConditionalGeneration(Qwen3ASRPreTrainedModel): |
| def _prepare_4d_causal_attention_mask_with_cache_position( |
| self, |
| attention_mask: torch.Tensor, |
| sequence_length: int, |
| target_length: int, |
| dtype: torch.dtype, |
| device: torch.device, |
| min_dtype: float, |
| cache_position: torch.Tensor, |
| batch_size: int, |
| ): |
| """ |
| Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape |
| `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. |
| |
| Args: |
| attention_mask (`torch.Tensor`): |
| A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. |
| sequence_length (`int`): |
| The sequence length being processed. |
| target_length (`int`): |
| The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. |
| dtype (`torch.dtype`): |
| The dtype to use for the 4D attention mask. |
| device (`torch.device`): |
| The device to place the 4D attention mask on. |
| min_dtype (`float`): |
| The minimum value representable with the dtype `dtype`. |
| cache_position (`torch.Tensor`): |
| Indices depicting the position of the input sequence tokens in the sequence. |
| batch_size (`torch.Tensor`): |
| Batch size. |
| """ |
| if attention_mask is not None and attention_mask.dim() == 4: |
| |
| causal_mask = attention_mask |
| else: |
| causal_mask = torch.full( |
| (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device |
| ) |
| if sequence_length != 1: |
| causal_mask = torch.triu(causal_mask, diagonal=1) |
| causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) |
| causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) |
| if attention_mask is not None: |
| causal_mask = causal_mask.clone() |
| mask_length = attention_mask.shape[-1] |
| padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] |
| padding_mask = padding_mask == 0 |
| causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( |
| padding_mask, min_dtype |
| ) |
|
|
| return causal_mask |
|
|
|
|
| def get_chunked_index( |
| self, token_indices: torch.Tensor, tokens_per_chunk: int, remove_index: int |
| ) -> list[tuple[int, int]]: |
| """ |
| Splits token index list into chunks based on token value ranges. |
| |
| Given a list of token indices, returns a list of (start, end) index tuples representing |
| slices of the list where the token values fall within successive ranges of `t_ntoken_per_chunk`. |
| |
| For example, if `t_ntoken_per_chunk` is 1000, the function will create chunks such that: |
| - the first chunk contains token values < 1000, |
| - the second chunk contains values >= 1000 and < 2000, and so on. |
| |
| Parameters: |
| token_indices (`torch.Tensor` of shape `(seq_len, )`): A monotonically increasing list of |
| token index values. |
| t_ntoken_per_chunk (`int`): Number of tokens per chunk (used as the chunk size threshold). |
| remove_index (`int`) An index id to subtract from `token_indices` before chunking |
| |
| Returns: |
| `list[tuple[int, int]]`: A list of tuples, each representing the start (inclusive) |
| and end (exclusive) indices of a chunk in `token_indices`. |
| """ |
|
|
| def _iter(): |
| i, start_idx = 0, 0 |
| current_chunk = 1 |
| while i < len(token_indices): |
| if token_indices[i] - remove_index >= current_chunk * tokens_per_chunk: |
| yield (start_idx, i) |
| start_idx = i |
| current_chunk += 1 |
| i += 1 |
| yield (start_idx, len(token_indices)) |
|
|
| return list(_iter()) |
|
|
| def get_rope_index( |
| self, |
| attention_mask: Optional[torch.Tensor] = None, |
| ) -> tuple[torch.Tensor, torch.Tensor]: |
| """ |
| Calculate the rope index in LLM. |
| |
| Explanation: |
| Each embedding sequence contains text embedding. |
| |
| Args: |
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
| Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide |
| it. |
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| audio_seqlens (`torch.LongTensor` of shape `(num_audios)`, *optional*): |
| The length of feature shape of each audio in LLM. |
| |
| Returns: |
| position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`) |
| mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`) |
| """ |
| mrope_position_deltas = [] |
|
|
| position_ids = attention_mask.float().cumsum(-1) - 1 |
| position_ids.masked_fill_(attention_mask == 0, 1) |
| position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device) |
| max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0] |
| mrope_position_deltas = max_position_ids + 1 - torch.sum(attention_mask, dim=-1, keepdim=True) |
|
|
| return position_ids, mrope_position_deltas |
|
|
|
|
| class Qwen3ASRAudioAttention(nn.Module): |
| """Multi-headed attention from 'Attention Is All You Need' paper""" |
|
|
| def __init__(self, config): |
| super().__init__() |
| self.embed_dim = config.d_model |
| self.num_heads = config.encoder_attention_heads |
| self.dropout = config.attention_dropout |
| self.head_dim = self.embed_dim // self.num_heads |
| self.num_key_value_groups = 1 |
| self.config = config |
|
|
| if (self.head_dim * self.num_heads) != self.embed_dim: |
| raise ValueError( |
| f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" |
| f" and `num_heads`: {self.num_heads})." |
| ) |
| self.scaling = self.head_dim**-0.5 |
| self.attention_dropout = 0.0 |
| self.is_decoder = False |
| self.is_causal = False |
| self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True) |
| self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True) |
| self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True) |
| self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| cu_seqlens: Optional[torch.Tensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| **kwargs, |
| ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: |
| """Input shape: Batch x Time x Channel""" |
|
|
| seq_length, _ = hidden_states.size() |
|
|
| query_states = self.q_proj(hidden_states).reshape(seq_length, self.num_heads, -1) |
| key_states = self.k_proj(hidden_states).reshape(seq_length, self.num_heads, -1) |
| value_states = self.v_proj(hidden_states).reshape(seq_length, self.num_heads, -1) |
|
|
| query_states = query_states.transpose(0, 1).unsqueeze(0) |
| key_states = key_states.transpose(0, 1).unsqueeze(0) |
| value_states = value_states.transpose(0, 1).unsqueeze(0) |
| max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max() |
|
|
| attention_interface: Callable = eager_attention_forward |
| if self.config._attn_implementation != "eager": |
| attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] |
|
|
| attn_output, _ = attention_interface( |
| self, |
| query_states, |
| key_states, |
| value_states, |
| attention_mask=attention_mask, |
| dropout=0.0 if not self.training else self.attention_dropout, |
| scaling=self.scaling, |
| cu_seq_lens_q=cu_seqlens, |
| cu_seq_lens_k=cu_seqlens, |
| max_length_q=max_seqlen, |
| max_length_k=max_seqlen, |
| is_causal=False, |
| **kwargs, |
| ) |
|
|
| attn_output = attn_output.reshape(seq_length, -1).contiguous() |
| attn_output = self.out_proj(attn_output) |
|
|
| return attn_output |
|
|
|
|
| class Qwen3ASRAudioEncoderLayer(GradientCheckpointingLayer): |
| def __init__(self, config: Qwen3ASRAudioEncoderConfig): |
| super().__init__() |
| self.embed_dim = config.d_model |
| self.self_attn = Qwen3ASRAudioAttention(config) |
| self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) |
| self.dropout = config.dropout |
| self.activation_fn = ACT2FN[config.activation_function] |
| self.activation_dropout = config.activation_dropout |
| self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) |
| self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) |
| self.final_layer_norm = nn.LayerNorm(self.embed_dim) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| cu_seqlens: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| **kwargs, |
| ) -> torch.Tensor: |
| """ |
| Args: |
| hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` |
| attention_mask (`torch.FloatTensor`): attention mask of size |
| `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. |
| layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size |
| `(encoder_attention_heads,)`. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| """ |
| residual = hidden_states |
| hidden_states = self.self_attn_layer_norm(hidden_states) |
| hidden_states = self.self_attn( |
| hidden_states=hidden_states, |
| cu_seqlens=cu_seqlens, |
| attention_mask=attention_mask, |
| **kwargs, |
| ) |
| hidden_states = residual + hidden_states |
| residual = hidden_states |
| hidden_states = self.final_layer_norm(hidden_states) |
| hidden_states = self.fc1(hidden_states) |
| hidden_states = self.activation_fn(hidden_states) |
| hidden_states = self.fc2(hidden_states) |
| hidden_states = residual + hidden_states |
|
|
| if hidden_states.dtype == torch.float16: |
| clamp_value = torch.finfo(hidden_states.dtype).max - 1000 |
| hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) |
|
|
| outputs = (hidden_states,) |
|
|
| return outputs |
|
|
|
|
| class SinusoidsPositionEmbedding(nn.Module): |
| def __init__(self, length, channels, max_timescale=10000): |
| super().__init__() |
| if channels % 2 != 0: |
| raise ValueError("SinusoidsPositionEmbedding needs even channels input") |
| log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1) |
| inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2).float()) |
| scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :] |
| self.register_buffer( |
| "positional_embedding", |
| torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1), |
| persistent=False, |
| ) |
|
|
| def forward(self, seqlen: int): |
| return self.positional_embedding[:seqlen, :] |
|
|
|
|
| @auto_docstring( |
| custom_intro=""" |
| Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a |
| [`Qwen3ASRAudioEncoderLayer`]. |
| """ |
| ) |
| class Qwen3ASRAudioEncoder(Qwen3ASRPreTrainedModel): |
| config: Qwen3ASRAudioEncoderConfig |
| main_input_name = "input_features" |
| _no_split_modules = ["Qwen3ASRAudioEncoderLayer"] |
| _supports_sdpa = True |
|
|
| def __init__(self, config: Qwen3ASRAudioEncoderConfig): |
| super().__init__(config) |
| self.dropout = config.dropout |
|
|
| embed_dim = config.d_model |
| self.num_mel_bins = config.num_mel_bins |
| self.max_source_positions = config.max_source_positions |
| self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 |
| self.n_window = config.n_window |
| self.positional_embedding = SinusoidsPositionEmbedding(self.max_source_positions, embed_dim) |
| self.layers = nn.ModuleList([Qwen3ASRAudioEncoderLayer(config) for _ in range(config.encoder_layers)]) |
| self.ln_post = nn.LayerNorm(config.d_model) |
| self.gradient_checkpointing = False |
| self.conv2d1 = nn.Conv2d(1, config.downsample_hidden_size, 3, 2, padding=1) |
| self.conv2d2 = nn.Conv2d(config.downsample_hidden_size, config.downsample_hidden_size, 3, 2, padding=1) |
| self.conv2d3 = nn.Conv2d(config.downsample_hidden_size, config.downsample_hidden_size, 3, 2, padding=1) |
| self.conv_out = nn.Linear( |
| config.downsample_hidden_size * ((((config.num_mel_bins + 1) // 2 + 1) // 2 + 1) // 2), |
| config.d_model, |
| bias=False, |
| ) |
| self.proj1 = nn.Linear(config.d_model, config.d_model) |
| self.act = ACT2FN[config.activation_function] |
| self.proj2 = nn.Linear(config.d_model, config.output_dim) |
| self.n_window_infer = self.config.n_window_infer |
| self.conv_chunksize = self.config.conv_chunksize |
| |
| self.post_init() |
|
|
| def _freeze_parameters(self): |
| for param in self.parameters(): |
| param.requires_grad = False |
| self._requires_grad = False |
|
|
| def get_input_embeddings(self) -> nn.Module: |
| return self.conv1 |
|
|
| def set_input_embeddings(self, value: nn.Module): |
| self.conv1 = value |
|
|
| def _prepare_attention_mask(self, inputs_tensor: torch.Tensor, cu_seqlens: torch.Tensor) -> torch.Tensor: |
| |
| |
| |
| |
| if self.config._attn_implementation == "flash_attention_2": |
| return None |
|
|
| seq_length = inputs_tensor.shape[0] |
| attention_mask = torch.full( |
| [1, 1, seq_length, seq_length], |
| torch.finfo(inputs_tensor.dtype).min, |
| device=inputs_tensor.device, |
| dtype=inputs_tensor.dtype, |
| ) |
| for i in range(1, len(cu_seqlens)): |
| attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = 0 |
| return attention_mask |
|
|
| @auto_docstring |
| def forward( |
| self, |
| input_features, |
| feature_lens=None, |
| aftercnn_lens=None, |
| ): |
| r""" |
| feature_lens (`torch.LongTensor` of shape `(batch_size,)`): |
| mel length |
| aftercnn_lens (`torch.LongTensor` of shape `(batch_size,)`): |
| mel length after cnn |
| """ |
| aftercnn_lens = _get_feat_extract_output_lengths(feature_lens) |
| chunk_num = torch.ceil(feature_lens / (self.n_window * 2)).long() |
|
|
| chunk_lengths = torch.tensor( |
| [self.n_window * 2] * chunk_num.sum(), |
| dtype=torch.long, |
| device=feature_lens.device, |
| ) |
| tail_chunk_index = F.pad(chunk_num, (1, 0), value=-1).cumsum(0)[1:] |
| chunk_lengths[tail_chunk_index] = feature_lens % (self.n_window * 2) |
| chunk_lengths[chunk_lengths == 0] = self.n_window * 2 |
|
|
| chunk_list = input_features.T.split(chunk_lengths.tolist(), dim=0) |
| padded_feature = nn.utils.rnn.pad_sequence(chunk_list, batch_first=True).transpose(1, 2) |
| feature_lens_after_cnn = _get_feat_extract_output_lengths(chunk_lengths) |
| padded_mask_after_cnn = nn.utils.rnn.pad_sequence( |
| [torch.ones(length, dtype=torch.bool, device=padded_feature.device) for length in feature_lens_after_cnn], |
| batch_first=True, |
| ) |
| padded_feature = padded_feature.unsqueeze(1) |
| |
| padded_embeds = [] |
| for chunk in padded_feature.split(self.conv_chunksize, dim=0): |
| padded_embed = F.gelu(self.conv2d1(chunk)) |
| padded_embed = F.gelu(self.conv2d2(padded_embed)) |
| padded_embed = F.gelu(self.conv2d3(padded_embed)) |
| padded_embeds.append(padded_embed) |
| padded_embed = torch.cat(padded_embeds, dim=0) |
| b, c, f, t = padded_embed.size() |
| padded_embed = self.conv_out(padded_embed.permute(0, 3, 1, 2).contiguous().view(b, t, c * f)) |
|
|
| positional_embedding = ( |
| self.positional_embedding.positional_embedding[: padded_embed.shape[1], :] |
| .unsqueeze(0) |
| .to(padded_embed.dtype) |
| ) |
| padded_embed = padded_embed + positional_embedding |
| hidden_states = padded_embed[padded_mask_after_cnn] |
| cu_chunk_lens = [0] |
| window_aftercnn = padded_mask_after_cnn.shape[-1] * (self.n_window_infer // (self.n_window * 2)) |
| for cnn_len in aftercnn_lens: |
| cu_chunk_lens += [window_aftercnn] * (cnn_len // window_aftercnn) |
| remainder = cnn_len % window_aftercnn |
| if remainder != 0: |
| cu_chunk_lens += [remainder] |
| cu_seqlens = torch.tensor(cu_chunk_lens, device=aftercnn_lens.device).cumsum(-1, dtype=torch.int32) |
|
|
| for encoder_layer in self.layers: |
| layer_outputs = encoder_layer( |
| hidden_states, |
| cu_seqlens, |
| ) |
|
|
| hidden_states = layer_outputs[0] |
|
|
| hidden_states = self.ln_post(hidden_states) |
| hidden_states = self.proj1(hidden_states) |
| hidden_states = self.act(hidden_states) |
| hidden_states = self.proj2(hidden_states) |
| return BaseModelOutput(last_hidden_state=hidden_states) |
|
|
| def padded_and_mask_function(self, tensor_list, tensor_len, padding_value=0, padding_side="right"): |
| """ |
| Pads a sequence of tensors to their maximum length on indicated `padding_side`. |
| Then prepares a mask so that pad tokens are not attended to. |
| """ |
| max_len = tensor_len.max() |
| dim = tensor_list[0].shape[0] |
| padded_tensor = torch.full( |
| size=(len(tensor_list), dim, max_len), |
| fill_value=padding_value, |
| dtype=self.dtype, |
| device=tensor_list[0].device, |
| ) |
|
|
| batch_mask = torch.zeros( |
| (len(tensor_len), max_len), |
| dtype=torch.long, |
| device=padded_tensor.device, |
| ) |
| for i, length in enumerate(tensor_len): |
| batch_mask[i, :length] = 1 |
| padded_tensor[i, :, :length] = tensor_list[i] |
|
|
| feature_lens_after_cnn = (tensor_len - 1) // 2 + 1 |
| max_len_after_cnn = feature_lens_after_cnn.max() |
| batch_mask_after_cnn = torch.zeros( |
| (len(tensor_len), max_len_after_cnn), |
| dtype=torch.long, |
| device=padded_tensor.device, |
| ) |
| for i, length in enumerate(feature_lens_after_cnn): |
| batch_mask_after_cnn[i, :length] = 1 |
| return ( |
| padded_tensor, |
| batch_mask.unsqueeze(1), |
| batch_mask_after_cnn.bool(), |
| ) |
|
|
|
|
| class Qwen3ASRThinkerTextRotaryEmbedding(nn.Module): |
| inv_freq: torch.Tensor |
|
|
| def __init__(self, config: Qwen3ASRConfig, device=None): |
| super().__init__() |
| if hasattr(config, "rope_scaling") and config.rope_scaling is not None: |
| self.rope_type = config.rope_scaling.get("rope_type", "default") |
| else: |
| self.rope_type = "default" |
| self.max_seq_len_cached = config.max_position_embeddings |
| self.original_max_seq_len = config.max_position_embeddings |
|
|
| self.config = config |
| self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] |
|
|
| inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) |
| self.register_buffer("inv_freq", inv_freq, persistent=False) |
| self.original_inv_freq = self.inv_freq |
|
|
| self.mrope_section = config.rope_scaling.get("mrope_section", [24, 20, 20]) |
|
|
| def apply_interleaved_mrope(self, freqs, mrope_section): |
| """Apply interleaved MRoPE to 3D rotary embeddings. |
| Reorganizes frequency layout from chunked [TTT...HHH...WWW] to |
| interleaved [THTHWHTHW...TT], preserving frequency continuity. |
| args: |
| x: (3, bs, seq_len, head_dim // 2) |
| mrope_section: (3,) |
| returns: |
| x_t: (bs, seq_len, head_dim // 2) |
| """ |
| freqs_t = freqs[0] |
| for dim, offset in enumerate((1, 2), start=1): |
| length = mrope_section[dim] * 3 |
| idx = slice(offset, length, 3) |
| freqs_t[..., idx] = freqs[dim, ..., idx] |
| return freqs_t |
|
|
| @torch.no_grad() |
| @dynamic_rope_update |
| def forward(self, x, position_ids): |
| |
| |
| if position_ids.ndim == 2: |
| position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1) |
| inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1) |
| position_ids_expanded = position_ids[:, :, None, :].float() |
|
|
| device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" |
| with torch.autocast(device_type=device_type, enabled=False): |
| freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3) |
| freqs = self.apply_interleaved_mrope(freqs, self.mrope_section) |
| emb = torch.cat((freqs, freqs), dim=-1) |
| cos = emb.cos() * self.attention_scaling |
| sin = emb.sin() * self.attention_scaling |
|
|
| return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) |
|
|
|
|
| class Qwen3ASRThinkerTextMLP(nn.Module): |
| def __init__(self, config, intermediate_size=None): |
| super().__init__() |
| self.config = config |
| self.hidden_size = config.hidden_size |
| self.intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size |
| self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) |
| self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) |
| self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) |
| self.act_fn = ACT2FN[config.hidden_act] |
|
|
| def forward(self, x): |
| down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) |
| return down_proj |
|
|
|
|
| @use_kernel_forward_from_hub("RMSNorm") |
| class Qwen3ASRThinkerTextRMSNorm(nn.Module): |
| def __init__(self, hidden_size, eps=1e-6): |
| """ |
| Qwen3ASRThinkerTextRMSNorm is equivalent to T5LayerNorm |
| """ |
| super().__init__() |
| self.weight = nn.Parameter(torch.ones(hidden_size)) |
| self.variance_epsilon = eps |
|
|
| def forward(self, hidden_states): |
| input_dtype = hidden_states.dtype |
| hidden_states = hidden_states.to(torch.float32) |
| variance = hidden_states.pow(2).mean(-1, keepdim=True) |
| hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) |
| return self.weight * hidden_states.to(input_dtype) |
|
|
| def extra_repr(self): |
| return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" |
|
|
|
|
| class Qwen3ASRThinkerTextAttention(nn.Module): |
| """Multi-headed attention from 'Attention Is All You Need' paper""" |
|
|
| def __init__(self, config, layer_idx): |
| super().__init__() |
| self.config = config |
| self.layer_idx = layer_idx |
| self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) |
| self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads |
| self.scaling = self.head_dim**-0.5 |
| self.attention_dropout = config.attention_dropout |
| self.is_causal = True |
|
|
| self.q_proj = nn.Linear( |
| config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias |
| ) |
| self.k_proj = nn.Linear( |
| config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias |
| ) |
| self.v_proj = nn.Linear( |
| config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias |
| ) |
| self.o_proj = nn.Linear( |
| config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias |
| ) |
| self.q_norm = Qwen3ASRThinkerTextRMSNorm( |
| self.head_dim, eps=config.rms_norm_eps |
| ) |
| self.k_norm = Qwen3ASRThinkerTextRMSNorm( |
| self.head_dim, eps=config.rms_norm_eps |
| ) |
| self.sliding_window = None |
|
|
| @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") |
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| position_embeddings: tuple[torch.Tensor, torch.Tensor], |
| attention_mask: Optional[torch.Tensor], |
| past_key_values: Optional[Cache] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| **kwargs: Unpack[FlashAttentionKwargs], |
| ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: |
| input_shape = hidden_states.shape[:-1] |
| hidden_shape = (*input_shape, -1, self.head_dim) |
|
|
| query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2) |
| key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2) |
| value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) |
|
|
| cos, sin = position_embeddings |
| query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) |
|
|
| if past_key_values is not None: |
| |
| cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} |
| key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) |
|
|
| attention_interface: Callable = eager_attention_forward |
| if self.config._attn_implementation != "eager": |
| attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] |
|
|
| attn_output, attn_weights = attention_interface( |
| self, |
| query_states, |
| key_states, |
| value_states, |
| attention_mask, |
| dropout=0.0 if not self.training else self.attention_dropout, |
| scaling=self.scaling, |
| sliding_window=self.sliding_window, |
| **kwargs, |
| ) |
|
|
| attn_output = attn_output.reshape(*input_shape, -1).contiguous() |
| attn_output = self.o_proj(attn_output) |
| return attn_output, attn_weights |
|
|
|
|
| @auto_docstring( |
| custom_intro=( |
| "Text part of Qwen3ASRThinker, " |
| ) |
| ) |
| class Qwen3ASRThinkerTextModel(Qwen3ASRPreTrainedModel): |
| config: Qwen3ASRConfig |
| _no_split_modules = ["Qwen3ASRThinkerTextDecoderLayer"] |
| config_class = Qwen3ASRConfig |
| _can_record_outputs = { |
| "hidden_states": Qwen3ASRThinkerTextDecoderLayer, |
| "attentions": Qwen3ASRThinkerTextAttention, |
| } |
|
|
| def __init__(self, config: Qwen3ASRConfig): |
| super().__init__(config) |
| self.padding_idx = config.pad_token_id |
| self.vocab_size = config.vocab_size |
|
|
| self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) |
| self.layers = nn.ModuleList( |
| [Qwen3ASRThinkerTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] |
| ) |
| self.norm = Qwen3ASRTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| self.rotary_emb = Qwen3ASRThinkerTextRotaryEmbedding(config) |
| self.gradient_checkpointing = False |
|
|
| |
| self.post_init() |
|
|
| @check_model_inputs() |
| @auto_docstring |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Cache] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| **kwargs: Unpack[FlashAttentionKwargs], |
| ) -> Union[tuple, BaseModelOutputWithPast]: |
| if (input_ids is None) ^ (inputs_embeds is not None): |
| raise ValueError("You must specify exactly one of input_ids or inputs_embeds") |
|
|
| |
| if use_cache and past_key_values is None and not torch.jit.is_tracing(): |
| past_key_values = DynamicCache(config=self.config) |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.embed_tokens(input_ids) |
|
|
| if cache_position is None: |
| past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 |
| cache_position = torch.arange( |
| past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device |
| ) |
|
|
| |
| if position_ids is None: |
| position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1) |
| elif position_ids.ndim == 2: |
| position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1) |
|
|
| if position_ids.ndim == 3 and position_ids.shape[0] == 4: |
| text_position_ids = position_ids[0] |
| position_ids = position_ids[1:] |
| else: |
| text_position_ids = position_ids[0] |
|
|
| attention_mask = create_causal_mask( |
| config=self.config, |
| input_embeds=inputs_embeds, |
| attention_mask=attention_mask, |
| cache_position=cache_position, |
| past_key_values=past_key_values, |
| position_ids=text_position_ids, |
| ) |
|
|
| hidden_states = inputs_embeds |
|
|
| |
| position_embeddings = self.rotary_emb(hidden_states, position_ids) |
|
|
| |
| for layer_idx, decoder_layer in enumerate(self.layers): |
| layer_outputs = decoder_layer( |
| hidden_states, |
| attention_mask=attention_mask, |
| position_ids=text_position_ids, |
| past_key_values=past_key_values, |
| cache_position=cache_position, |
| position_embeddings=position_embeddings, |
| **kwargs, |
| ) |
| hidden_states = layer_outputs |
|
|
| hidden_states = self.norm(hidden_states) |
|
|
| return BaseModelOutputWithPast( |
| last_hidden_state=hidden_states, |
| past_key_values=past_key_values, |
| ) |
|
|
|
|
| @auto_docstring( |
| custom_intro=""" |
| The Qwen3ASRThinker model which consists of a audio backbone and a language model. |
| """ |
| ) |
| class Qwen3ASRThinkerForConditionalGeneration(Qwen3ASRPreTrainedModelForConditionalGeneration, GenerationMixin): |
| config: Qwen3ASRThinkerConfig |
| base_model_prefix = "thinker" |
| _tied_weights_keys = ["model.embed_tokens.weight", "lm_head.weight"] |
| _no_split_modules = [ |
| "Qwen3ASRAudioEncoderLayer", |
| "Qwen3ASRThinkerTextDecoderLayer", |
| ] |
| _can_record_outputs = { |
| "hidden_states": Qwen3ASRThinkerTextDecoderLayer, |
| "attentions": Qwen3ASRThinkerTextAttention, |
| } |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| self.audio_tower = Qwen3ASRAudioEncoder._from_config(config.audio_config) |
| self.vocab_size = config.text_config.vocab_size |
| self.model = Qwen3ASRThinkerTextModel._from_config(config.text_config) |
| if "forced_aligner" in config.model_type: |
| self.lm_head = nn.Linear(config.text_config.hidden_size, config.classify_num, bias=False) |
| else: |
| self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) |
| self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1 |
| self.rope_deltas = None |
| self.post_init() |
|
|
| def get_input_embeddings(self): |
| return self.model.get_input_embeddings() |
|
|
| def set_input_embeddings(self, value): |
| self.model.set_input_embeddings(value) |
|
|
| def get_audio_features( |
| self, |
| input_features: torch.FloatTensor, |
| feature_attention_mask: Optional[torch.LongTensor] = None, |
| audio_feature_lengths: Optional[torch.LongTensor] = None, |
| ): |
| """ |
| Encodes audios into continuous embeddings that can be forwarded to the language model. |
| |
| Args: |
| input_features (`torch.FloatTensor`): |
| The tensors corresponding to the input audios. |
| feature_attention_mask (`torch.LongTensor`, *optional*): |
| Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`: |
| audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*): |
| The length of feature shape of each audio in LLM. |
| """ |
| if feature_attention_mask is not None: |
| audio_feature_lengths = torch.sum(feature_attention_mask, dim=1) |
| else: |
| audio_feature_lengths = None |
| feature_lens = audio_feature_lengths if audio_feature_lengths is not None else feature_attention_mask.sum(-1) |
| |
| |
| audio_features = [] |
| for input_feature, feature_len in zip(input_features, feature_lens): |
| audio_output = self.audio_tower( |
| input_feature[:, :feature_len], |
| feature_lens=feature_len.unsqueeze(0), |
| ) |
| audio_feature = audio_output.last_hidden_state |
| audio_features.append(audio_feature) |
| audio_features = torch.cat(audio_features, dim=0) |
|
|
| return audio_features |
|
|
| def get_placeholder_mask( |
| self, |
| input_ids: torch.LongTensor, |
| inputs_embeds: torch.FloatTensor, |
| ): |
| """ |
| Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is |
| equal to the length of multimodal features. If the lengths are different, an error is raised. |
| """ |
| if input_ids is None: |
| special_audio_mask = ( |
| inputs_embeds |
| == self.get_input_embeddings()( |
| torch.tensor(self.config.audio_token_id, dtype=torch.long, device=inputs_embeds.device) |
| ) |
| ).all(-1) |
| else: |
| special_audio_mask = input_ids == self.config.audio_token_id |
|
|
| special_audio_mask = special_audio_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) |
| return special_audio_mask |
|
|
| @can_return_tuple |
| @auto_docstring |
| def forward( |
| self, |
| input_ids=None, |
| input_features=None, |
| attention_mask=None, |
| feature_attention_mask=None, |
| audio_feature_lengths=None, |
| position_ids=None, |
| past_key_values=None, |
| inputs_embeds=None, |
| rope_deltas=None, |
| labels=None, |
| use_cache=None, |
| cache_position=None, |
| **kwargs, |
| ) -> Union[tuple, Qwen3ASRThinkerCausalLMOutputWithPast]: |
| r""" |
| feature_attention_mask (`torch.Tensor` of shape `(batch_size, feature_sequence_length)`, *optional*): |
| Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`: |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*): |
| The length of feature shape of each audio in LLM. |
| rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): |
| The rope index difference between sequence length and multimodal rope. |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., |
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored |
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. |
| """ |
|
|
| if inputs_embeds is None: |
| |
| inputs_embeds = self.get_input_embeddings()(input_ids) |
|
|
| |
| if input_features is not None: |
| audio_features = self.get_audio_features( |
| input_features, |
| feature_attention_mask=feature_attention_mask, |
| audio_feature_lengths=audio_feature_lengths, |
| ) |
| audio_features = audio_features.to(inputs_embeds.device, inputs_embeds.dtype) |
| audio_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds) |
| inputs_embeds = inputs_embeds.masked_scatter(audio_mask, audio_features) |
|
|
| if feature_attention_mask is not None: |
| audio_feature_lengths = torch.sum(feature_attention_mask, dim=1) |
| else: |
| audio_feature_lengths = None |
|
|
| if attention_mask is not None and position_ids is None: |
| if ( |
| cache_position is None |
| or (cache_position is not None and cache_position[0] == 0) |
| or self.rope_deltas is None |
| ): |
| delta0 = (1 - attention_mask).sum(dim=-1).unsqueeze(1) |
| position_ids, rope_deltas = self.get_rope_index( |
| attention_mask, |
| ) |
| rope_deltas = rope_deltas - delta0 |
| self.rope_deltas = rope_deltas |
| else: |
| batch_size, seq_length = input_ids.shape |
| delta = cache_position[0] + self.rope_deltas if cache_position is not None else 0 |
| position_ids = torch.arange(seq_length, device=input_ids.device) |
| position_ids = position_ids.view(1, -1).expand(batch_size, -1) |
| position_ids = position_ids.add(delta) |
| position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) |
|
|
| outputs = self.model( |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| cache_position=cache_position, |
| **kwargs, |
| ) |
|
|
| hidden_states = outputs[0] |
| logits = self.lm_head(hidden_states) |
|
|
| loss = None |
| if labels is not None: |
| loss = self.loss_function( |
| logits=logits, labels=labels, vocab_size=self.config.get_text_config().vocab_size |
| ) |
|
|
| return Qwen3ASRThinkerCausalLMOutputWithPast( |
| loss=loss, |
| logits=logits, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| past_key_values=outputs.past_key_values, |
| rope_deltas=self.rope_deltas, |
| ) |
|
|
| def prepare_inputs_for_generation( |
| self, |
| input_ids, |
| past_key_values=None, |
| attention_mask=None, |
| inputs_embeds=None, |
| cache_position=None, |
| position_ids=None, |
| use_cache=True, |
| input_features=None, |
| feature_attention_mask=None, |
| **kwargs, |
| ): |
| model_inputs = super().prepare_inputs_for_generation( |
| input_ids, |
| past_key_values=past_key_values, |
| attention_mask=attention_mask, |
| inputs_embeds=inputs_embeds, |
| cache_position=cache_position, |
| position_ids=position_ids, |
| use_cache=use_cache, |
| input_features=input_features, |
| feature_attention_mask=feature_attention_mask, |
| **kwargs, |
| ) |
|
|
| model_inputs["position_ids"] = None |
|
|
| if cache_position[0] != 0: |
| model_inputs["input_features"] = None |
|
|
| return model_inputs |
|
|
|
|
| @auto_docstring |
| class Qwen3ASRThinkerTextPreTrainedModel(PreTrainedModel): |
| config = Qwen3ASRConfig |
| base_model_prefix = "model" |
| supports_gradient_checkpointing = True |
| _no_split_modules = ["Qwen3ASRThinkerTextDecoderLayer"] |
| _skip_keys_device_placement = ["past_key_values"] |
| _supports_flash_attn = True |
| _supports_sdpa = True |
| _supports_flex_attn = True |
| _can_compile_fullgraph = False |
| _supports_attention_backend = True |
| _can_record_outputs = { |
| "hidden_states": Qwen3ASRThinkerTextDecoderLayer, |
| "attentions": Qwen3ASRThinkerTextAttention, |
| } |
| config_class = Qwen3ASRConfig |
|
|
|
|
| class Qwen3ASRForConditionalGeneration(Qwen3ASRPreTrainedModel, GenerationMixin): |
| config_class = Qwen3ASRConfig |
|
|
| def __init__(self, config: Qwen3ASRConfig): |
| super().__init__(config) |
| self.config = config |
|
|
| self.thinker = Qwen3ASRThinkerForConditionalGeneration._from_config(config.thinker_config) |
| self.post_init() |
| |
| def get_support_languages(self): |
| return self.config.support_languages |
|
|
| @torch.no_grad() |
| def generate( |
| self, |
| input_ids: Optional[torch.Tensor] = None, |
| max_new_tokens: int = 4096, |
| eos_token_id: int | list[int] = [151645, 151643], |
| **kwargs, |
| ): |
| shared_kwargs = {} |
| thinker_kwargs = { |
| "max_new_tokens": max_new_tokens, |
| "eos_token_id": eos_token_id, |
| } |
|
|
| for key, value in kwargs.items(): |
| |
| if key == "feature_attention_mask": |
| thinker_kwargs[key] = value |
| elif key in ("input_features", "attention_mask"): |
| thinker_kwargs[key] = value |
| |
| else: |
| shared_kwargs[key] = value |
|
|
| |
| for key, value in shared_kwargs.items(): |
| if key not in thinker_kwargs: |
| thinker_kwargs[key] = value |
|
|
| thinker_result = self.thinker.generate(input_ids=input_ids, return_dict_in_generate=True, **thinker_kwargs) |
|
|
| return thinker_result |
|
|
| def transcribe( |
| self, |
| audio: "np.ndarray", |
| processor, |
| language: str = "Danish", |
| target_sr: int = 16_000, |
| step_seconds: float = 15.0, |
| rollback_tokens: int = 8, |
| max_new_tokens: int = 2048, |
| ) -> str: |
| """Transcribe audio using accumulated-audio continuation decoding. |
| |
| Args: |
| audio: 1-D float32 waveform at *target_sr* Hz (use |
| ``processor.load_audio`` to obtain this). |
| processor: The ``Qwen3ASRProcessor`` used to tokenize prompts and |
| extract audio features. |
| language: Language name for the prompt tag. Defaults to |
| ``"Danish"``. |
| target_sr: Sample rate of *audio* in Hz. Must match what |
| ``processor.load_audio`` produced. Defaults to 16 000. |
| step_seconds: Seconds of new audio fed per continuation step. |
| Defaults to 15.0. |
| rollback_tokens: Number of tokens to roll back when building the |
| text prefix for continuation. Defaults to 8. |
| max_new_tokens: Generation budget per step. Defaults to 2048. |
| |
| Returns: |
| The full transcription string. |
| """ |
| device = next(self.parameters()).device |
| dtype = next(self.parameters()).dtype |
|
|
| base_prompt = processor.build_prompt(language=language) |
| step_samples = max(1, int(round(step_seconds * target_sr))) |
| raw_decoded = "" |
| audio_accum = np.zeros((0,), dtype=np.float32) |
|
|
| for chunk_idx, start in enumerate(range(0, audio.shape[0], step_samples)): |
| chunk = audio[start : start + step_samples] |
| if chunk.size == 0: |
| continue |
|
|
| audio_accum = ( |
| chunk if audio_accum.size == 0 |
| else np.concatenate([audio_accum, chunk]) |
| ) |
|
|
| prefix = "" |
| if chunk_idx >= 1 and raw_decoded: |
| cur_ids = processor.tokenizer.encode(raw_decoded) |
| rb = rollback_tokens |
| while True: |
| end_idx = max(0, len(cur_ids) - rb) |
| prefix = ( |
| processor.tokenizer.decode(cur_ids[:end_idx]) |
| if end_idx > 0 else "" |
| ) |
| if "\ufffd" not in prefix or end_idx == 0: |
| break |
| rb += 1 |
|
|
| inputs = processor( |
| text=[base_prompt + prefix], |
| audio=[audio_accum], |
| sampling_rate=target_sr, |
| return_tensors="pt", |
| padding=True, |
| ) |
| inputs = {k: v.to(device) for k, v in inputs.items()} |
| if inputs["input_features"].is_floating_point(): |
| inputs["input_features"] = inputs["input_features"].to(dtype=dtype) |
|
|
| with torch.inference_mode(): |
| generated = self.generate(**inputs, max_new_tokens=max_new_tokens) |
|
|
| decoded = processor.batch_decode( |
| generated.sequences[:, inputs["input_ids"].shape[1]:], |
| skip_special_tokens=True, |
| clean_up_tokenization_spaces=False, |
| )[0] |
| raw_decoded = prefix + decoded |
|
|
| return raw_decoded.strip() |
|
|
|
|
| __all__ = [ |
| "Qwen3ASRForConditionalGeneration", |
| "Qwen3ASRThinkerTextModel", |
| "Qwen3ASRThinkerForConditionalGeneration", |
| "Qwen3ASRPreTrainedModel", |
| "Qwen3ASRPreTrainedModelForConditionalGeneration", |
| "Qwen3ASRThinkerTextPreTrainedModel", |
| ] |
|
|