| import torch |
| import torch.nn.functional as F |
|
|
|
|
| class LPLayerNorm(torch.nn.LayerNorm): |
| def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True, device=None, dtype=None): |
| super().__init__( |
| normalized_shape=normalized_shape, |
| eps=eps, |
| elementwise_affine=elementwise_affine, |
| device=device, |
| dtype=dtype, |
| ) |
|
|
| def forward(self, x): |
| module_device = x.device |
| downcast_x = _cast_if_autocast_enabled(x) |
| downcast_weight = _cast_if_autocast_enabled( |
| self.weight) if self.weight is not None else self.weight |
| downcast_bias = _cast_if_autocast_enabled( |
| self.bias) if self.bias is not None else self.bias |
| with torch.autocast(enabled=False, device_type=module_device.type): |
| return F.layer_norm(downcast_x, self.normalized_shape, downcast_weight, downcast_bias, self.eps) |
|
|
|
|
| def _cast_if_autocast_enabled(tensor): |
| if torch.is_autocast_enabled(): |
| if tensor.device.type == 'cuda': |
| dtype = torch.get_autocast_gpu_dtype() |
| elif tensor.device.type == 'cpu': |
| dtype = torch.get_autocast_cpu_dtype() |
| else: |
| raise NotImplementedError() |
| return tensor.to(dtype=dtype) |
| return tensor |
|
|