| import torch.nn as nn |
| import torch.nn.functional as F |
| from einops import rearrange |
|
|
| class TimeEmbedding(nn.Module): |
| def __init__(self, dim): |
| super().__init__() |
| self.proj = nn.Sequential( |
| nn.Linear(1, dim), |
| nn.SiLU(), |
| nn.Linear(dim, dim) |
| ) |
| |
| def forward(self, t): |
| return self.proj(t) |
|
|
| class Conv3DBlock(nn.Module): |
| def __init__(self, in_ch, out_ch, time_dim): |
| super().__init__() |
| self.time_mlp = nn.Linear(time_dim, out_ch) |
| self.conv = nn.Conv3d(in_ch, out_ch, kernel_size=3, padding=1) |
| self.norm = nn.BatchNorm3d(out_ch) |
| |
| def forward(self, x, t_emb): |
| t_emb = self.time_mlp(t_emb).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) |
| return F.silu(self.norm(self.conv(x) + t_emb)) |
|
|
| class UNet3D(nn.Module): |
| def __init__(self, in_ch=3, out_ch=3, text_dim=768): |
| super().__init__() |
| self.time_embed = TimeEmbedding(256) |
| self.text_proj = nn.Linear(text_dim, 256) |
| |
| |
| self.down1 = Conv3DBlock(in_ch, 64, 256) |
| self.down2 = Conv3DBlock(64, 128, 256) |
| self.down3 = Conv3DBlock(128, 256, 256) |
| |
| |
| self.up1 = Conv3DBlock(256 + 128, 128, 256) |
| self.up2 = Conv3DBlock(128 + 64, 64, 256) |
| self.up3 = nn.Conv3d(64, out_ch, kernel_size=3, padding=1) |
| |
| def forward(self, x, t, text_emb): |
| t_emb = self.time_embed(t) |
| text_emb = self.text_proj(text_emb) |
| c_emb = t_emb + text_emb |
| |
| |
| x1 = self.down1(x, c_emb) |
| x2 = self.down2(F.max_pool3d(x1, 2), c_emb) |
| x3 = self.down3(F.max_pool3d(x2, 2), c_emb) |
| |
| |
| x = F.interpolate(x3, scale_factor=2) |
| x = self.up1(torch.cat([x, x2], dim=1), c_emb) |
| x = F.interpolate(x, scale_factor=2) |
| x = self.up2(torch.cat([x, x1], dim=1), c_emb) |
| x = self.up3(x) |
| return x |