| |
| |
| |
| |
| |
| |
|
|
| import math |
|
|
| import torch |
| import torch.nn as nn |
| from diffusers.configuration_utils import ConfigMixin, register_to_config |
| from diffusers.models.modeling_utils import ModelMixin |
|
|
| from .attention import flash_attention |
|
|
|
|
| def sinusoidal_embedding_1d(dim, position): |
| |
| assert dim % 2 == 0 |
| half = dim // 2 |
| position = position.type(torch.float64) |
|
|
| |
| sinusoid = torch.outer( |
| position, torch.pow(10000, -torch.arange(half).to(position).div(half)) |
| ) |
| x = torch.cat([torch.cos(sinusoid), torch.sin(sinusoid)], dim=1) |
| return x |
|
|
|
|
| @torch.amp.autocast("cuda", enabled=False) |
| def rope_params(max_seq_len, dim, theta=10000): |
| assert dim % 2 == 0 |
| freqs = torch.outer( |
| torch.arange(max_seq_len), |
| 1.0 / torch.pow(theta, torch.arange(0, dim, 2).to(torch.float64).div(dim)), |
| ) |
| freqs = torch.polar(torch.ones_like(freqs), freqs) |
| return freqs |
|
|
|
|
| @torch.amp.autocast("cuda", enabled=False) |
| def rope_apply(x, grid_sizes, freqs): |
| n, c = x.size(2), x.size(3) // 2 |
|
|
| |
| freqs = freqs.split([c - 2 * (c // 3), c // 3, c // 3], dim=1) |
|
|
| |
| output = [] |
| for i, (f, h, w) in enumerate(grid_sizes.tolist()): |
| seq_len = f * h * w |
|
|
| |
| x_i = torch.view_as_complex( |
| x[i, :seq_len].to(torch.float64).reshape(seq_len, n, -1, 2) |
| ) |
| freqs_i = torch.cat( |
| [ |
| freqs[0][:f].view(f, 1, 1, -1).expand(f, h, w, -1), |
| freqs[1][:h].view(1, h, 1, -1).expand(f, h, w, -1), |
| freqs[2][:w].view(1, 1, w, -1).expand(f, h, w, -1), |
| ], |
| dim=-1, |
| ).reshape(seq_len, 1, -1) |
|
|
| |
| x_i = torch.view_as_real(x_i * freqs_i).flatten(2) |
| x_i = torch.cat([x_i, x[i, seq_len:]]) |
|
|
| |
| output.append(x_i) |
| return torch.stack(output).float() |
|
|
|
|
| class WanRMSNorm(nn.Module): |
| def __init__(self, dim, eps=1e-5): |
| super().__init__() |
| self.dim = dim |
| self.eps = eps |
| self.weight = nn.Parameter(torch.ones(dim)) |
|
|
| def forward(self, x): |
| r""" |
| Args: |
| x(Tensor): Shape [B, L, C] |
| """ |
| return self._norm(x.float()).type_as(x) * self.weight |
|
|
| def _norm(self, x): |
| return x * torch.rsqrt(x.pow(2).mean(dim=-1, keepdim=True) + self.eps) |
|
|
|
|
| class WanLayerNorm(nn.LayerNorm): |
| def __init__(self, dim, eps=1e-6, elementwise_affine=False): |
| super().__init__(dim, elementwise_affine=elementwise_affine, eps=eps) |
|
|
| def forward(self, x): |
| r""" |
| Args: |
| x(Tensor): Shape [B, L, C] |
| """ |
| return super().forward(x.float()).type_as(x) |
|
|
|
|
| class WanSelfAttention(nn.Module): |
| def __init__( |
| self, dim, num_heads, window_size=(-1, -1), qk_norm=True, eps=1e-6, causal=False |
| ): |
| assert dim % num_heads == 0 |
| super().__init__() |
| self.dim = dim |
| self.num_heads = num_heads |
| self.head_dim = dim // num_heads |
| self.window_size = window_size |
| self.qk_norm = qk_norm |
| self.eps = eps |
| self.causal = causal |
| |
| self.q = nn.Linear(dim, dim) |
| self.k = nn.Linear(dim, dim) |
| self.v = nn.Linear(dim, dim) |
| self.o = nn.Linear(dim, dim) |
| self.norm_q = WanRMSNorm(dim, eps=eps) if qk_norm else nn.Identity() |
| self.norm_k = WanRMSNorm(dim, eps=eps) if qk_norm else nn.Identity() |
|
|
| def forward(self, x, seq_lens, grid_sizes, freqs): |
| r""" |
| Args: |
| x(Tensor): Shape [B, L, num_heads, C / num_heads] |
| seq_lens(Tensor): Shape [B] |
| grid_sizes(Tensor): Shape [B, 3], the second dimension contains (F, H, W) |
| freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2] |
| """ |
| b, s, n, d = *x.shape[:2], self.num_heads, self.head_dim |
|
|
| |
| def qkv_fn(x): |
| q = self.norm_q(self.q(x)).view(b, s, n, d) |
| k = self.norm_k(self.k(x)).view(b, s, n, d) |
| v = self.v(x).view(b, s, n, d) |
| return q, k, v |
|
|
| q, k, v = qkv_fn(x) |
|
|
| x = flash_attention( |
| q=rope_apply(q, grid_sizes, freqs), |
| k=rope_apply(k, grid_sizes, freqs), |
| v=v, |
| k_lens=seq_lens, |
| window_size=self.window_size, |
| causal=self.causal, |
| ) |
|
|
| |
| x = x.flatten(2) |
| x = self.o(x) |
| return x |
|
|
|
|
| class WanCrossAttention(WanSelfAttention): |
| def forward(self, x, context, context_lens): |
| r""" |
| Args non-stream mode: |
| x(Tensor): Shape [B, L1, C] |
| context(Tensor): Shape [B, L2, C] |
| context_lens(Tensor): Shape [B] |
| Args stream mode: |
| x(Tensor): Shape [B, L1, C] |
| context(Tensor): Shape [BxL1, L2, C] |
| context_lens(Tensor): Shape [BxL1] |
| """ |
| out_sizes = x.size() |
| b, n, d = context.size(0), self.num_heads, self.head_dim |
|
|
| |
| q = self.norm_q(self.q(x)).view(b, -1, n, d) |
| k = self.norm_k(self.k(context)).view(b, -1, n, d) |
| v = self.v(context).view(b, -1, n, d) |
|
|
| |
| x = flash_attention(q, k, v, k_lens=context_lens) |
|
|
| |
| x = x.flatten(2).view(*out_sizes) |
| x = self.o(x) |
| return x |
|
|
|
|
| class WanAttentionBlock(nn.Module): |
| def __init__( |
| self, |
| dim, |
| ffn_dim, |
| num_heads, |
| window_size=(-1, -1), |
| qk_norm=True, |
| cross_attn_norm=False, |
| eps=1e-6, |
| causal=False, |
| ): |
| super().__init__() |
| self.dim = dim |
| self.ffn_dim = ffn_dim |
| self.num_heads = num_heads |
| self.window_size = window_size |
| self.qk_norm = qk_norm |
| self.cross_attn_norm = cross_attn_norm |
| self.eps = eps |
| self.causal = causal |
| |
| self.norm1 = WanLayerNorm(dim, eps) |
| self.self_attn = WanSelfAttention( |
| dim, num_heads, window_size, qk_norm, eps, causal |
| ) |
| self.norm3 = ( |
| WanLayerNorm(dim, eps, elementwise_affine=True) |
| if cross_attn_norm |
| else nn.Identity() |
| ) |
|
|
| self.cross_attn = WanCrossAttention(dim, num_heads, (-1, -1), qk_norm, eps) |
| self.norm2 = WanLayerNorm(dim, eps) |
| self.ffn = nn.Sequential( |
| nn.Linear(dim, ffn_dim), |
| nn.GELU(approximate="tanh"), |
| nn.Linear(ffn_dim, dim), |
| ) |
|
|
| |
| self.modulation = nn.Parameter(torch.randn(1, 6, dim) / dim**0.5) |
|
|
| def forward( |
| self, |
| x, |
| e, |
| seq_lens, |
| grid_sizes, |
| freqs, |
| context, |
| context_lens, |
| ): |
| r""" |
| Args: |
| x(Tensor): Shape [B, L, C] |
| e(Tensor): Shape [B, L1, 6, C] |
| seq_lens(Tensor): Shape [B], length of each sequence in batch |
| grid_sizes(Tensor): Shape [B, 3], the second dimension contains (F, H, W) |
| freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2] |
| """ |
| assert e.dtype == torch.float32 |
| with torch.amp.autocast("cuda", dtype=torch.float32): |
| e = (self.modulation.unsqueeze(0) + e).chunk(6, dim=2) |
| assert e[0].dtype == torch.float32 |
|
|
| |
| y = self.self_attn( |
| self.norm1(x).float() * (1 + e[1].squeeze(2)) + e[0].squeeze(2), |
| seq_lens, |
| grid_sizes, |
| freqs, |
| ) |
| with torch.amp.autocast("cuda", dtype=torch.float32): |
| x = x + y * e[2].squeeze(2) |
|
|
| |
| def cross_attn_ffn(x, context, context_lens, e): |
| x = x + self.cross_attn(self.norm3(x), context, context_lens) |
| y = self.ffn( |
| self.norm2(x).float() * (1 + e[4].squeeze(2)) + e[3].squeeze(2) |
| ) |
| with torch.amp.autocast("cuda", dtype=torch.float32): |
| x = x + y * e[5].squeeze(2) |
| return x |
|
|
| x = cross_attn_ffn(x, context, context_lens, e) |
| return x |
|
|
|
|
| class Head(nn.Module): |
| def __init__(self, dim, out_dim, patch_size, eps=1e-6): |
| super().__init__() |
| self.dim = dim |
| self.out_dim = out_dim |
| self.patch_size = patch_size |
| self.eps = eps |
|
|
| |
| out_dim = math.prod(patch_size) * out_dim |
| self.norm = WanLayerNorm(dim, eps) |
| self.head = nn.Linear(dim, out_dim) |
|
|
| |
| self.modulation = nn.Parameter(torch.randn(1, 2, dim) / dim**0.5) |
|
|
| def forward(self, x, e): |
| r""" |
| Args: |
| x(Tensor): Shape [B, L1, C] |
| e(Tensor): Shape [B, L1, C] |
| """ |
| assert e.dtype == torch.float32 |
| with torch.amp.autocast("cuda", dtype=torch.float32): |
| e = (self.modulation.unsqueeze(0) + e.unsqueeze(2)).chunk(2, dim=2) |
| x = self.head(self.norm(x) * (1 + e[1].squeeze(2)) + e[0].squeeze(2)) |
| return x |
|
|
|
|
| class WanModel(ModelMixin, ConfigMixin): |
| r""" |
| Wan diffusion backbone supporting both text-to-video and image-to-video. |
| """ |
|
|
| ignore_for_config = [ |
| "patch_size", |
| "cross_attn_norm", |
| "qk_norm", |
| "text_dim", |
| "window_size", |
| ] |
| _no_split_modules = ["WanAttentionBlock"] |
|
|
| @register_to_config |
| def __init__( |
| self, |
| model_type="t2v", |
| patch_size=(1, 2, 2), |
| text_len=512, |
| in_dim=16, |
| dim=2048, |
| ffn_dim=8192, |
| freq_dim=256, |
| text_dim=4096, |
| out_dim=16, |
| num_heads=16, |
| num_layers=32, |
| window_size=(-1, -1), |
| qk_norm=True, |
| cross_attn_norm=True, |
| eps=1e-6, |
| causal=False, |
| ): |
| r""" |
| Initialize the diffusion model backbone. |
| |
| Args: |
| model_type (`str`, *optional*, defaults to 't2v'): |
| Model variant - 't2v' (text-to-video) or 'i2v' (image-to-video) |
| patch_size (`tuple`, *optional*, defaults to (1, 2, 2)): |
| 3D patch dimensions for video embedding (t_patch, h_patch, w_patch) |
| text_len (`int`, *optional*, defaults to 512): |
| Fixed length for text embeddings |
| in_dim (`int`, *optional*, defaults to 16): |
| Input video channels (C_in) |
| dim (`int`, *optional*, defaults to 2048): |
| Hidden dimension of the transformer |
| ffn_dim (`int`, *optional*, defaults to 8192): |
| Intermediate dimension in feed-forward network |
| freq_dim (`int`, *optional*, defaults to 256): |
| Dimension for sinusoidal time embeddings |
| text_dim (`int`, *optional*, defaults to 4096): |
| Input dimension for text embeddings |
| out_dim (`int`, *optional*, defaults to 16): |
| Output video channels (C_out) |
| num_heads (`int`, *optional*, defaults to 16): |
| Number of attention heads |
| num_layers (`int`, *optional*, defaults to 32): |
| Number of transformer blocks |
| window_size (`tuple`, *optional*, defaults to (-1, -1)): |
| Window size for local attention (-1 indicates global attention) |
| qk_norm (`bool`, *optional*, defaults to True): |
| Enable query/key normalization |
| cross_attn_norm (`bool`, *optional*, defaults to False): |
| Enable cross-attention normalization |
| eps (`float`, *optional*, defaults to 1e-6): |
| Epsilon value for normalization layers |
| """ |
|
|
| super().__init__() |
|
|
| assert model_type in ["t2v", "i2v", "ti2v", "s2v"] |
| self.model_type = model_type |
|
|
| self.patch_size = patch_size |
| self.text_len = text_len |
| self.in_dim = in_dim |
| self.dim = dim |
| self.ffn_dim = ffn_dim |
| self.freq_dim = freq_dim |
| self.text_dim = text_dim |
| self.out_dim = out_dim |
| self.num_heads = num_heads |
| self.num_layers = num_layers |
| self.window_size = window_size |
| self.qk_norm = qk_norm |
| self.cross_attn_norm = cross_attn_norm |
| self.eps = eps |
| self.causal = causal |
| |
| self.patch_embedding = nn.Conv3d( |
| in_dim, dim, kernel_size=patch_size, stride=patch_size |
| ) |
| self.text_embedding = nn.Sequential( |
| nn.Linear(text_dim, dim), nn.GELU(approximate="tanh"), nn.Linear(dim, dim) |
| ) |
|
|
| self.time_embedding = nn.Sequential( |
| nn.Linear(freq_dim, dim), nn.SiLU(), nn.Linear(dim, dim) |
| ) |
| self.time_projection = nn.Sequential(nn.SiLU(), nn.Linear(dim, dim * 6)) |
|
|
| |
| self.blocks = nn.ModuleList( |
| [ |
| WanAttentionBlock( |
| dim, |
| ffn_dim, |
| num_heads, |
| window_size, |
| qk_norm, |
| cross_attn_norm, |
| eps, |
| causal, |
| ) |
| for _ in range(num_layers) |
| ] |
| ) |
|
|
| |
| self.head = Head(dim, out_dim, patch_size, eps) |
|
|
| |
| assert (dim % num_heads) == 0 and (dim // num_heads) % 2 == 0 |
| d = dim // num_heads |
| self.freqs = torch.cat( |
| [ |
| rope_params(1024, d - 4 * (d // 6)), |
| rope_params(1024, 2 * (d // 6)), |
| rope_params(1024, 2 * (d // 6)), |
| ], |
| dim=1, |
| ) |
|
|
| |
| self.init_weights() |
|
|
| def forward( |
| self, |
| x, |
| t, |
| context, |
| seq_len, |
| y=None, |
| ): |
| r""" |
| Forward pass through the diffusion model |
| |
| Args: |
| x (List[Tensor]): |
| List of input video tensors, each with shape [C_in, F, H, W] |
| t (Tensor): |
| Diffusion timesteps tensor of shape [B] |
| context (List[Tensor]): |
| List of text embeddings each with shape [L, C] |
| seq_len (`int`): |
| Maximum sequence length for positional encoding |
| y (List[Tensor], *optional*): |
| Conditional video inputs for image-to-video mode, same shape as x |
| |
| Returns: |
| List[Tensor]: |
| List of denoised video tensors with original input shapes [C_out, F, H / 8, W / 8] |
| """ |
| if self.model_type == "i2v": |
| assert y is not None |
| |
| device = self.patch_embedding.weight.device |
| if self.freqs.device != device: |
| self.freqs = self.freqs.to(device) |
|
|
| if y is not None: |
| x = [torch.cat([u, v], dim=0) for u, v in zip(x, y)] |
|
|
| |
| x = [self.patch_embedding(u.unsqueeze(0)) for u in x] |
| grid_sizes = torch.stack( |
| [torch.tensor(u.shape[2:], dtype=torch.long) for u in x] |
| ) |
| x = [u.flatten(2).transpose(1, 2) for u in x] |
| seq_lens = torch.tensor([u.size(1) for u in x], dtype=torch.long) |
| assert seq_lens.max() <= seq_len |
| x = torch.cat( |
| [ |
| torch.cat([u, u.new_zeros(1, seq_len - u.size(1), u.size(2))], dim=1) |
| for u in x |
| ] |
| ) |
|
|
| |
| if t.dim() == 1: |
| t = t.expand(t.size(0), seq_len) |
| with torch.amp.autocast("cuda", dtype=torch.float32): |
| bt = t.size(0) |
| t = t.flatten() |
| e = self.time_embedding( |
| sinusoidal_embedding_1d(self.freq_dim, t) |
| .unflatten(0, (bt, seq_len)) |
| .float() |
| ) |
| e0 = self.time_projection(e).unflatten(2, (6, self.dim)) |
| assert e.dtype == torch.float32 and e0.dtype == torch.float32 |
|
|
| |
| context_lens = torch.tensor([u.size(0) for u in context], dtype=torch.long) |
| context = self.text_embedding( |
| torch.stack( |
| [ |
| torch.cat([u, u.new_zeros(self.text_len - u.size(0), u.size(1))]) |
| for u in context |
| ] |
| ) |
| ) |
|
|
| |
| kwargs = dict( |
| e=e0, |
| seq_lens=seq_lens, |
| grid_sizes=grid_sizes, |
| freqs=self.freqs, |
| context=context, |
| context_lens=context_lens, |
| ) |
|
|
| for block in self.blocks: |
| x = block(x, **kwargs) |
|
|
| |
| x = self.head(x, e) |
|
|
| |
| x = self.unpatchify(x, grid_sizes) |
| return [u.float() for u in x] |
|
|
| def unpatchify(self, x, grid_sizes): |
| r""" |
| Reconstruct video tensors from patch embeddings. |
| |
| Args: |
| x (List[Tensor]): |
| List of patchified features, each with shape [L, C_out * prod(patch_size)] |
| grid_sizes (Tensor): |
| Original spatial-temporal grid dimensions before patching, |
| shape [B, 3] (3 dimensions correspond to F_patches, H_patches, W_patches) |
| |
| Returns: |
| List[Tensor]: |
| Reconstructed video tensors with shape [C_out, F, H / 8, W / 8] |
| """ |
|
|
| c = self.out_dim |
| out = [] |
| for u, v in zip(x, grid_sizes.tolist()): |
| u = u[: math.prod(v)].view(*v, *self.patch_size, c) |
| u = torch.einsum("fhwpqrc->cfphqwr", u) |
| u = u.reshape(c, *[i * j for i, j in zip(v, self.patch_size)]) |
| out.append(u) |
| return out |
|
|
| def init_weights(self): |
| r""" |
| Initialize model parameters using Xavier initialization. |
| """ |
|
|
| |
| for m in self.modules(): |
| if isinstance(m, nn.Linear): |
| nn.init.xavier_uniform_(m.weight) |
| if m.bias is not None: |
| nn.init.zeros_(m.bias) |
|
|
| |
| nn.init.xavier_uniform_(self.patch_embedding.weight.flatten(1)) |
| for m in self.text_embedding.modules(): |
| if isinstance(m, nn.Linear): |
| nn.init.normal_(m.weight, std=0.02) |
| for m in self.time_embedding.modules(): |
| if isinstance(m, nn.Linear): |
| nn.init.normal_(m.weight, std=0.02) |
|
|
| |
| nn.init.zeros_(self.head.head.weight) |
|
|