group
stringclasses 5
values | version
stringclasses 1
value | prompt
stringlengths 48
35.8k
| code_str
stringclasses 213
values | target
stringlengths 4
395
| right_context_few_lines
stringlengths 1
358
| library
stringclasses 1
value | api
stringlengths 6
61
⌀ |
|---|---|---|---|---|---|---|---|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn.functional as F
from torch.distributed import all_reduce, get_rank, get_world_size, init_process_group
def compute_world_size() -> int:
rank = int(os.getenv("RANK")) # pyre-ignore[6]
world_size = int(os.getenv("WORLD_SIZE")) # pyre-ignore[6]
master_port = int(os.getenv("MASTER_PORT")) # pyre-ignore[6]
master_addr = os.getenv("MASTER_ADDR")
backend = "gloo"
print(f"initializing `{backend}` process group")
init_process_group(
backend=backend,
init_method=f"tcp://{master_addr}:{master_port}",
rank=rank,
world_size=world_size,
)
print("successfully initialized process group")
rank =
|
get
|
get_rank()
|
world_size = get_world_size()
t = F.one_hot(torch.tensor(rank), num_classes=world_size)
all_reduce(t)
|
torch
|
torch.distributed.get_rank
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn.functional as F
from torch.distributed import all_reduce, get_rank, get_world_size, init_process_group
def compute_world_size() -> int:
rank = int(os.getenv("RANK")) # pyre-ignore[6]
world_size = int(os.getenv("WORLD_SIZE")) # pyre-ignore[6]
master_port = int(os.getenv("MASTER_PORT")) # pyre-ignore[6]
master_addr = os.getenv("MASTER_ADDR")
backend = "gloo"
print(f"initializing `{backend}` process group")
init_process_group(
backend=backend,
init_method=f"tcp://{master_addr}:{master_port}",
rank=rank,
world_size=world_size,
)
print("successfully initialized process group")
rank = get_rank()
world_size =
|
get
|
get_world_size()
|
t = F.one_hot(torch.tensor(rank), num_classes=world_size)
all_reduce(t)
computed_world_size = int(torch.sum(t).item())
|
torch
|
torch.distributed.get_world_size
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn.functional as F
from torch.distributed import all_reduce, get_rank, get_world_size, init_process_group
def compute_world_size() -> int:
rank = int(os.getenv("RANK")) # pyre-ignore[6]
world_size = int(os.getenv("WORLD_SIZE")) # pyre-ignore[6]
master_port = int(os.getenv("MASTER_PORT")) # pyre-ignore[6]
master_addr = os.getenv("MASTER_ADDR")
backend = "gloo"
print(f"initializing `{backend}` process group")
init_process_group(
backend=backend,
init_method=f"tcp://{master_addr}:{master_port}",
rank=rank,
world_size=world_size,
)
print("successfully initialized process group")
rank = get_rank()
world_size = get_world_size()
t =
|
F
|
F.one_hot(torch.tensor(rank), num_classes=world_size)
|
all_reduce(t)
computed_world_size = int(torch.sum(t).item())
print(
f"rank: {rank}, actual world_size: {world_size}, computed world_size: {computed_world_size}"
|
torch
|
torch.nn.functional.one_hot
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.distributed as dist
from torch.distributed.distributed_c10d import _get_default_group
def local_device() -> torch.device:
"""
Returns the device that the current process should be using for models and tensors
based on the default process group.
.. note:: If the process group has not been initialized
then this method returns ``cuda`` if GPU is available on the machine, and ``cpu`` otherwise.
Returns ``cuda:$LOCAL_RANK`` if the default process group's backend is ``nccl`` otherwise ``cpu``
"""
if dist.is_initialized():
default_pg =
|
_get_default_group()
|
return (
local_cuda_device()
if default_pg.options.backend == "nccl"
else torch.device("cpu")
|
torch
|
torch.distributed.distributed_c10d._get_default_group
|
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.jit
from torch.nn import functional as F
class TinyImageNetModel(pl.LightningModule):
"""
An very simple linear model for the tiny image net dataset.
"""
def __init__(
self, layer_sizes: Optional[List[int]] = None, lr: Optional[float] = None
) -> None:
super().__init__()
if not layer_sizes:
layer_sizes = [1, 1, 1, 1]
self.lr: float = lr or 0.001
m = ResNet(BasicBlock, layer_sizes)
m.avgpool =
|
torch
|
torch.nn.AdaptiveAvgPool2d(1)
|
m.fc.out_features = 200
self.model: ResNet = m
self.train_acc = Accuracy()
|
torch
|
torch.nn.AdaptiveAvgPool2d
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.jit
from torch.nn import functional as F
def export_inference_model(
model: TinyImageNetModel, out_path: str, tmpdir: str
) -> None:
"""
export_inference_model uses TorchScript JIT to serialize the
TinyImageNetModel into a standalone file that can be used during inference.
TorchServe can also handle interpreted models with just the model.py file if
your model can't be JITed.
"""
print("exporting inference model")
jit_path = os.path.join(tmpdir, "model_jit.pt")
jitted =
|
torch
|
torch.jit.script(model)
|
print(f"saving JIT model to {jit_path}")
torch.jit.save(jitted, jit_path)
model_name = "tiny_image_net"
|
torch
|
torch.jit.script
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 =
|
nn
|
nn.Conv2d(1, 32, 3, 1)
|
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
|
torch
|
torch.nn.Conv2d
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 =
|
nn
|
nn.Dropout(0.25)
|
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
|
torch
|
torch.nn.Dropout
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 =
|
nn
|
nn.Linear(9216, 128)
|
self.fc2 = nn.Linear(128, 10)
def forward(self, x: Tensor) -> Tensor:
x = self.conv1(x)
|
torch
|
torch.nn.Linear
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x =
|
F
|
F.relu(x)
|
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
|
torch
|
torch.nn.functional.relu
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x =
|
F
|
F.max_pool2d(x, 2)
|
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
|
torch
|
torch.nn.functional.max_pool2d
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x =
|
torch
|
torch.flatten(x, 1)
|
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
|
torch
|
torch.flatten
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output =
|
F
|
F.log_softmax(x, dim=1)
|
return output
def train(
|
torch
|
torch.nn.functional.log_softmax
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
def train(
args: Namespace,
model: nn.Module,
device: torch.device,
train_loader: torch.utils.data.DataLoader[VisionDataset],
optimizer: optim.Optimizer,
epoch: int,
writer: Optional[SummaryWriter],
) -> None:
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss =
|
F
|
F.nll_loss(output, target)
|
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print(
|
torch
|
torch.nn.functional.nll_loss
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
def main() -> None:
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=1000,
metavar="N",
help="input batch size for testing (default: 1000)",
)
parser.add_argument(
"--epochs",
type=int,
default=14,
metavar="N",
help="number of epochs to train (default: 14)",
)
parser.add_argument(
"--lr",
type=float,
default=1.0,
metavar="LR",
help="learning rate (default: 1.0)",
)
parser.add_argument(
"--gamma",
type=float,
default=0.7,
metavar="M",
help="Learning rate step gamma (default: 0.7)",
)
parser.add_argument(
"--no-cuda", action="store_true", default=False, help="disables CUDA training"
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--save-model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--tb-log-path",
type=str,
default=None,
help="Tensorboard log path",
)
parser.add_argument(
"--data-path",
type=str,
default="../data",
help="Model data storge path",
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device =
|
torch
|
torch.device("cuda" if use_cuda else "cpu")
|
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.test_batch_size}
if use_cuda:
|
torch
|
torch.device
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
def main() -> None:
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=1000,
metavar="N",
help="input batch size for testing (default: 1000)",
)
parser.add_argument(
"--epochs",
type=int,
default=14,
metavar="N",
help="number of epochs to train (default: 14)",
)
parser.add_argument(
"--lr",
type=float,
default=1.0,
metavar="LR",
help="learning rate (default: 1.0)",
)
parser.add_argument(
"--gamma",
type=float,
default=0.7,
metavar="M",
help="Learning rate step gamma (default: 0.7)",
)
parser.add_argument(
"--no-cuda", action="store_true", default=False, help="disables CUDA training"
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--save-model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--tb-log-path",
type=str,
default=None,
help="Tensorboard log path",
)
parser.add_argument(
"--data-path",
type=str,
default="../data",
help="Model data storge path",
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.test_batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
data_path = args.data_path
if not os.path.exists(data_path):
os.makedirs(data_path)
dataset1 = datasets.MNIST(data_path, train=True, download=True, transform=transform)
dataset2 = datasets.MNIST(data_path, train=False, transform=transform)
train_loader =
|
torch
|
torch.utils.data.DataLoader(dataset1, **train_kwargs)
|
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
|
torch
|
torch.utils.data.DataLoader
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
def main() -> None:
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=1000,
metavar="N",
help="input batch size for testing (default: 1000)",
)
parser.add_argument(
"--epochs",
type=int,
default=14,
metavar="N",
help="number of epochs to train (default: 14)",
)
parser.add_argument(
"--lr",
type=float,
default=1.0,
metavar="LR",
help="learning rate (default: 1.0)",
)
parser.add_argument(
"--gamma",
type=float,
default=0.7,
metavar="M",
help="Learning rate step gamma (default: 0.7)",
)
parser.add_argument(
"--no-cuda", action="store_true", default=False, help="disables CUDA training"
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--save-model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--tb-log-path",
type=str,
default=None,
help="Tensorboard log path",
)
parser.add_argument(
"--data-path",
type=str,
default="../data",
help="Model data storge path",
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.test_batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
data_path = args.data_path
if not os.path.exists(data_path):
os.makedirs(data_path)
dataset1 = datasets.MNIST(data_path, train=True, download=True, transform=transform)
dataset2 = datasets.MNIST(data_path, train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer =
|
optim
|
optim.Adadelta(model.parameters(), lr=args.lr)
|
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
|
torch
|
torch.optim.Adadelta
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
def main() -> None:
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=1000,
metavar="N",
help="input batch size for testing (default: 1000)",
)
parser.add_argument(
"--epochs",
type=int,
default=14,
metavar="N",
help="number of epochs to train (default: 14)",
)
parser.add_argument(
"--lr",
type=float,
default=1.0,
metavar="LR",
help="learning rate (default: 1.0)",
)
parser.add_argument(
"--gamma",
type=float,
default=0.7,
metavar="M",
help="Learning rate step gamma (default: 0.7)",
)
parser.add_argument(
"--no-cuda", action="store_true", default=False, help="disables CUDA training"
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--save-model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--tb-log-path",
type=str,
default=None,
help="Tensorboard log path",
)
parser.add_argument(
"--data-path",
type=str,
default="../data",
help="Model data storge path",
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.test_batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
data_path = args.data_path
if not os.path.exists(data_path):
os.makedirs(data_path)
dataset1 = datasets.MNIST(data_path, train=True, download=True, transform=transform)
dataset2 = datasets.MNIST(data_path, train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler =
|
StepLR
|
StepLR(optimizer, step_size=1, gamma=args.gamma)
|
app_run = tracker.app_run_from_env()
|
torch
|
torch.optim.lr_scheduler.StepLR
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
def main() -> None:
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=1000,
metavar="N",
help="input batch size for testing (default: 1000)",
)
parser.add_argument(
"--epochs",
type=int,
default=14,
metavar="N",
help="number of epochs to train (default: 14)",
)
parser.add_argument(
"--lr",
type=float,
default=1.0,
metavar="LR",
help="learning rate (default: 1.0)",
)
parser.add_argument(
"--gamma",
type=float,
default=0.7,
metavar="M",
help="Learning rate step gamma (default: 0.7)",
)
parser.add_argument(
"--no-cuda", action="store_true", default=False, help="disables CUDA training"
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--save-model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--tb-log-path",
type=str,
default=None,
help="Tensorboard log path",
)
parser.add_argument(
"--data-path",
type=str,
default="../data",
help="Model data storge path",
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.test_batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
data_path = args.data_path
if not os.path.exists(data_path):
os.makedirs(data_path)
dataset1 = datasets.MNIST(data_path, train=True, download=True, transform=transform)
dataset2 = datasets.MNIST(data_path, train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
app_run = tracker.app_run_from_env()
app_run.add_metadata(**train_kwargs)
app_run.add_metadata(lr=args.lr, gamma=args.gamma)
app_run.add_metadata(data_path=data_path)
writer = None
if args.tb_log_path:
writer =
|
SummaryWriter
|
SummaryWriter(log_dir=args.tb_log_path)
|
app_run.add_artifact("tensorboard", args.tb_log_path)
for epoch in range(1, args.epochs + 1):
|
torch
|
torch.utils.tensorboard.SummaryWriter
|
torch_direct_api
|
v_1_10_0
|
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
class TestMinifier(TestCase):
def test_has_mul_minifier(self):
def failing_f(x, y):
y = y / 3
x = x + 3
x = x * y
return x + y
inps = [
|
torch
|
torch.randn(3)
|
, torch.randn(3)]
failing_f = make_fx(failing_f)(*inps)
def pass_checker(fx_g, inps):
return (torch.ops.aten.mul in set([i.target for i in fx_g.graph.nodes]))
|
torch
|
torch.randn
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn as nn
import torch.utils._pytree as pytree
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
class TestAOTAutograd(TestCase):
def verify_aot_autograd(self, f, inp):
if isinstance(f, nn.Module):
compiled_f = aot_module(f, nop)
else:
compiled_f = aot_function(f, nop)
ref_out, ref_grad = _outs_and_grads(f, inp)
test_out, test_grad = _outs_and_grads(compiled_f, inp)
self.assertEqual(ref_out, test_out)
self.assertEqual(ref_grad, test_grad)
def test_single_output(self):
def f(a, b):
return a + b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output(self):
def f(a, b):
return a + b, a - b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output_list(self):
def f(a, b):
return [a + b, a - b]
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_no_grad_input_output(self):
def f(a, b):
return a.cos(), b.cos(), a * b
inp_thunks = [lambda: torch.randn(5, requires_grad=True), lambda: torch.randn(5, requires_grad=False)]
for inps in itertools.product(inp_thunks, repeat=2):
inps = [i() for i in inps]
self.verify_aot_autograd(f, inps)
def test_inner_grad(self):
def foo(x):
y =
|
torch
|
torch.exp(x)
|
z = torch.autograd.grad(y, x)
return z
inps = [torch.randn((), requires_grad=True)]
self.verify_aot_autograd(foo, inps)
|
torch
|
torch.exp
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn as nn
import torch.utils._pytree as pytree
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
class TestAOTAutograd(TestCase):
def verify_aot_autograd(self, f, inp):
if isinstance(f, nn.Module):
compiled_f = aot_module(f, nop)
else:
compiled_f = aot_function(f, nop)
ref_out, ref_grad = _outs_and_grads(f, inp)
test_out, test_grad = _outs_and_grads(compiled_f, inp)
self.assertEqual(ref_out, test_out)
self.assertEqual(ref_grad, test_grad)
def test_single_output(self):
def f(a, b):
return a + b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output(self):
def f(a, b):
return a + b, a - b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output_list(self):
def f(a, b):
return [a + b, a - b]
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_no_grad_input_output(self):
def f(a, b):
return a.cos(), b.cos(), a * b
inp_thunks = [lambda: torch.randn(5, requires_grad=True), lambda: torch.randn(5, requires_grad=False)]
for inps in itertools.product(inp_thunks, repeat=2):
inps = [i() for i in inps]
self.verify_aot_autograd(f, inps)
def test_inner_grad(self):
def foo(x):
y = torch.exp(x)
z =
|
torch
|
torch.autograd.grad(y, x)
|
return z
inps = [torch.randn((), requires_grad=True)]
self.verify_aot_autograd(foo, inps)
|
torch
|
torch.autograd.grad
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn as nn
import torch.utils._pytree as pytree
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
class TestAOTAutograd(TestCase):
def verify_aot_autograd(self, f, inp):
if isinstance(f, nn.Module):
compiled_f = aot_module(f, nop)
else:
compiled_f = aot_function(f, nop)
ref_out, ref_grad = _outs_and_grads(f, inp)
test_out, test_grad = _outs_and_grads(compiled_f, inp)
self.assertEqual(ref_out, test_out)
self.assertEqual(ref_grad, test_grad)
def test_single_output(self):
def f(a, b):
return a + b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output(self):
def f(a, b):
return a + b, a - b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output_list(self):
def f(a, b):
return [a + b, a - b]
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_no_grad_input_output(self):
def f(a, b):
return a.cos(), b.cos(), a * b
inp_thunks = [lambda: torch.randn(5, requires_grad=True), lambda: torch.randn(5, requires_grad=False)]
for inps in itertools.product(inp_thunks, repeat=2):
inps = [i() for i in inps]
self.verify_aot_autograd(f, inps)
def test_inner_grad(self):
def foo(x):
y = torch.exp(x)
z = torch.autograd.grad(y, x)
return z
inps = [torch.randn((), requires_grad=True)]
self.verify_aot_autograd(foo, inps)
def test_grad_context(self):
def foo(x):
return x * 2
inps = [torch.randn((), requires_grad=True)]
graph_size = None
def assert_graph_empty(fx_g, _):
nonlocal graph_size
graph_size = len(fx_g.graph.nodes)
return fx_g
start_recompilations = num_of_recompilations()
f = aot_function(foo, nop, assert_graph_empty)
with torch.set_grad_enabled(False):
f(*inps)
self.assertEqual(graph_size, 2)
with torch.set_grad_enabled(True):
f(*inps)
self.assertTrue(graph_size > 2)
self.assertEqual(num_of_recompilations() - start_recompilations, 2)
def test_output_dict(self):
def f(x):
return {'a': x, 'b': x}
inp = [torch.randn(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp)
def f(x, y):
return {'a': x, 'b': y + x}
inp = [torch.randn(3, requires_grad=True), torch.randn(3)]
self.verify_aot_autograd(f, inp)
def f(x):
new_d = {}
for k in x:
new_d[k] = x[k] * 2
return new_d
inp = [{'a': torch.randn(3, requires_grad=True), 'b': torch.randn(3, requires_grad=True)}]
self.verify_aot_autograd(f, inp)
def test_module(self):
mod =
|
nn
|
nn.Sequential(nn.Linear(32, 32), nn.ReLU())
|
compiled_mod = compiled_module(mod, nop, nop)
inp = torch.randn(32, 32)
ref_out = mod(inp)
ref_out.sum().backward()
|
torch
|
torch.nn.Sequential
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn as nn
import torch.utils._pytree as pytree
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
class TestAOTAutograd(TestCase):
def verify_aot_autograd(self, f, inp):
if isinstance(f, nn.Module):
compiled_f = aot_module(f, nop)
else:
compiled_f = aot_function(f, nop)
ref_out, ref_grad = _outs_and_grads(f, inp)
test_out, test_grad = _outs_and_grads(compiled_f, inp)
self.assertEqual(ref_out, test_out)
self.assertEqual(ref_grad, test_grad)
def test_single_output(self):
def f(a, b):
return a + b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output(self):
def f(a, b):
return a + b, a - b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output_list(self):
def f(a, b):
return [a + b, a - b]
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_no_grad_input_output(self):
def f(a, b):
return a.cos(), b.cos(), a * b
inp_thunks = [lambda: torch.randn(5, requires_grad=True), lambda: torch.randn(5, requires_grad=False)]
for inps in itertools.product(inp_thunks, repeat=2):
inps = [i() for i in inps]
self.verify_aot_autograd(f, inps)
def test_inner_grad(self):
def foo(x):
y = torch.exp(x)
z = torch.autograd.grad(y, x)
return z
inps = [torch.randn((), requires_grad=True)]
self.verify_aot_autograd(foo, inps)
def test_grad_context(self):
def foo(x):
return x * 2
inps = [torch.randn((), requires_grad=True)]
graph_size = None
def assert_graph_empty(fx_g, _):
nonlocal graph_size
graph_size = len(fx_g.graph.nodes)
return fx_g
start_recompilations = num_of_recompilations()
f = aot_function(foo, nop, assert_graph_empty)
with torch.set_grad_enabled(False):
f(*inps)
self.assertEqual(graph_size, 2)
with torch.set_grad_enabled(True):
f(*inps)
self.assertTrue(graph_size > 2)
self.assertEqual(num_of_recompilations() - start_recompilations, 2)
def test_output_dict(self):
def f(x):
return {'a': x, 'b': x}
inp = [torch.randn(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp)
def f(x, y):
return {'a': x, 'b': y + x}
inp = [torch.randn(3, requires_grad=True), torch.randn(3)]
self.verify_aot_autograd(f, inp)
def f(x):
new_d = {}
for k in x:
new_d[k] = x[k] * 2
return new_d
inp = [{'a': torch.randn(3, requires_grad=True), 'b': torch.randn(3, requires_grad=True)}]
self.verify_aot_autograd(f, inp)
def test_module(self):
mod = nn.Sequential(nn.Linear(32, 32), nn.ReLU())
compiled_mod = compiled_module(mod, nop, nop)
inp = torch.randn(32, 32)
ref_out = mod(inp)
ref_out.sum().backward()
ref_grads = sorted([(name, p.grad) for name, p in mod.named_parameters()])
out = compiled_mod(inp)
out.sum().backward()
grads = sorted([(name, p.grad) for name, p in mod.named_parameters()])
self.assertEqual((out, grads), (ref_out, ref_grads))
def test_batchnorm(self):
mod = compiled_module(nn.BatchNorm2d(4), nop, nop)
x =
|
torch
|
torch.ones(1, 4, 2, 2)
|
mod(x).sum().backward()
class TestEagerFusionOpInfo(TestCase):
|
torch
|
torch.ones
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn as nn
import torch.utils._pytree as pytree
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
class TestEagerFusionOpInfo(TestCase):
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestEagerFusionOpInfo', 'test_aot_autograd_exhaustive', {
xfail('__rmatmul__'),
xfail('linalg.cholesky'),
xfail('matmul'),
skip('msort'),
xfail('nn.functional.linear'),
xfail('nn.functional.dropout'),
xfail('polar'),
xfail('special.zeta', 'grad'),
xfail('to_sparse'),
xfail('addcdiv'),
xfail('cholesky'),
xfail('cumulative_trapezoid'),
xfail('diag_embed'),
xfail('linalg.householder_product'),
xfail('logit'),
xfail('matrix_exp'),
xfail('trapezoid'),
xfail('trapz'),
xfail('trace'),
skip('nn.functional.binary_cross_entropy_with_logits') # seems to fail sometimes?
})
def test_aot_autograd_exhaustive(self, device, dtype, op):
def f(args, kwargs):
return op.op(*args, **kwargs)
if not op.supports_autograd:
return
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=True)
for sample_input in sample_inputs_itr:
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
if not all([isinstance(i, torch.Tensor) and i.dtype == torch.float for i in args]):
self.skipTest("not all inputs are float tensors")
if not all([isinstance(i, torch.Tensor) and i.dtype == torch.float for i in kwargs.values()]):
self.skipTest("not all inputs are float tensors")
continue
t = f(args, kwargs)
if isinstance(t, tuple):
self.skipTest("output is a tuple")
continue
def reset_grads():
def f(x):
x.grad = None
pytree.tree_map(f, args)
def get_grads(args):
return pytree.tree_map(lambda x: x.grad, args)
compiled_f = compiled_function(f, nop, nop)
reset_grads()
compiled_f(args, kwargs).sum().backward()
compiled_grad = get_grads(args)
reset_grads()
f(args, kwargs).sum().backward()
orig_grad = get_grads(args)
self.assertEqual(orig_grad, compiled_grad)
def create_new_arg(x):
return x.detach().uniform_(0, 1).requires_grad_(x.requires_grad)
args =
|
pytree
|
pytree.tree_map(create_new_arg, args)
|
reset_grads()
compiled_f(args, kwargs).sum().backward()
compiled_grad = get_grads(args)
|
torch
|
torch.utils._pytree.tree_map
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn as nn
import torch.utils._pytree as pytree
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
class TestPartitioning(TestCase):
@unittest.skipIf(not USE_NETWORKX, "networkx not available")
def test_recompute_partitioning(self):
def fn(a, b):
return torch.sin(torch.sin(a)) + b
ref_a =
|
torch
|
torch.rand(10, 10, requires_grad=True)
|
ref_b = torch.rand(10, 10, requires_grad=True)
ref = fn(ref_a, ref_b)
ref.sum().backward()
|
torch
|
torch.rand
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets =
|
torch
|
torch.randint(0, C, (N,), device=device)
|
def foo(y, targets):
return F.cross_entropy(y, targets)
|
torch
|
torch.randint
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x =
|
torch
|
torch.tensor([1., 2., 3.], device=device)
|
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
|
torch
|
torch.tensor
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1+1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 =
|
torch
|
torch.cos(y)
|
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
|
torch
|
torch.cos
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1+1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 = torch.cos(y)
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
self.assertEqual(grads[0], -x.sin())
self.assertEqual(grads[1], -y.sin())
def test_view_inplace_simple(self, device):
def foo(x):
x = x.clone()
x.view([]).sin_()
return x
x = torch.randn([], requires_grad=True, device=device)
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_invalid_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=-3)(x, y)
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=2)(x, y)
with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):
grad(torch.mul, argnums=[0])(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be int'):
grad(torch.mul, argnums=('0',))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, 0))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, -2))(x, y)
def test_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=0)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(0,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_out_of_order_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gy, gx = grad(torch.mul, argnums=(1, 0))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_negative_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=-2)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=-1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(-2,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(-2, -1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_grad_pytree_inputs(self, device):
x = torch.randn([], device=device)
def f(a, b):
x, y = a
return 1 * x + 2 * y + 3 * b['foo']
args = ((x, x), {'foo': x})
gx, gy = grad(f)(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), = grad(f, argnums=(0,))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), gz = grad(f, argnums=(0, 1))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
self.assertEqual(gz['foo'], torch.tensor(3., device=device))
def test_grad_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: [t, t], has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: (t, t + 2, t + 3), has_aux=True)(x)
def f(t):
y = t.sin()
return y.sum(), t.cos()
out, aux = grad(f, has_aux=True)(x)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.cos())
def test_grad_aux_pytree(self, device):
def f(x):
y = x.sin()
return y.sum(), {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, aux = grad(f, has_aux=True)(x)
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
self.assertEqual(out, x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), [x, aux]), has_aux=True)(x)
def test_zero_grad(self, device):
def f(x):
return (x['a']**2.0).sum()
inps = ({'a': torch.randn(10, device=device) + 3, 'b': torch.randn(10, device=device)})
grads = grad(f)(inps)
self.assertNotEqual(grads['a'].sum(), 0.0)
self.assertEqual(grads['b'].sum(), 0.0)
def test_unrelated_grad(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
def unrelated(x):
return y
result = grad(unrelated)(x)
self.assertEqual(result, torch.zeros_like(x))
def test_unrelated_vjp(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(x):
return y
out, vjp_fn = vjp(unrelated, x)
result = vjp_fn(v)
expected = (
|
torch
|
torch.zeros_like(x)
|
,)
self.assertEqual(result, expected)
def test_unrelated_vjp_multiple_inputs_outputs(self, device):
w = torch.tensor(3., device=device)
|
torch
|
torch.zeros_like
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1+1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 = torch.cos(y)
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
self.assertEqual(grads[0], -x.sin())
self.assertEqual(grads[1], -y.sin())
def test_view_inplace_simple(self, device):
def foo(x):
x = x.clone()
x.view([]).sin_()
return x
x = torch.randn([], requires_grad=True, device=device)
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_invalid_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=-3)(x, y)
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=2)(x, y)
with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):
grad(torch.mul, argnums=[0])(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be int'):
grad(torch.mul, argnums=('0',))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, 0))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, -2))(x, y)
def test_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=0)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(0,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_out_of_order_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gy, gx = grad(torch.mul, argnums=(1, 0))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_negative_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=-2)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=-1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(-2,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(-2, -1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_grad_pytree_inputs(self, device):
x = torch.randn([], device=device)
def f(a, b):
x, y = a
return 1 * x + 2 * y + 3 * b['foo']
args = ((x, x), {'foo': x})
gx, gy = grad(f)(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), = grad(f, argnums=(0,))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), gz = grad(f, argnums=(0, 1))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
self.assertEqual(gz['foo'], torch.tensor(3., device=device))
def test_grad_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: [t, t], has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: (t, t + 2, t + 3), has_aux=True)(x)
def f(t):
y = t.sin()
return y.sum(), t.cos()
out, aux = grad(f, has_aux=True)(x)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.cos())
def test_grad_aux_pytree(self, device):
def f(x):
y = x.sin()
return y.sum(), {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, aux = grad(f, has_aux=True)(x)
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
self.assertEqual(out, x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), [x, aux]), has_aux=True)(x)
def test_zero_grad(self, device):
def f(x):
return (x['a']**2.0).sum()
inps = ({'a': torch.randn(10, device=device) + 3, 'b': torch.randn(10, device=device)})
grads = grad(f)(inps)
self.assertNotEqual(grads['a'].sum(), 0.0)
self.assertEqual(grads['b'].sum(), 0.0)
def test_unrelated_grad(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
def unrelated(x):
return y
result = grad(unrelated)(x)
self.assertEqual(result, torch.zeros_like(x))
def test_unrelated_vjp(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(x):
return y
out, vjp_fn = vjp(unrelated, x)
result = vjp_fn(v)
expected = (torch.zeros_like(x),)
self.assertEqual(result, expected)
def test_unrelated_vjp_multiple_inputs_outputs(self, device):
w = torch.tensor(3., device=device)
x = torch.tensor(4., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(w, x):
return y, y, x
out, vjp_fn = vjp(unrelated, w, x)
result = vjp_fn((v, v, v))
expected = (torch.zeros_like(x), torch.ones_like(x))
self.assertEqual(result, expected)
@onlyCPU
def test_unrelated_hessian(self, device):
N = 5
M = 3
W = torch.randn(N, M, device=device)
def f(x):
return W @ x
x = torch.randn(M)
result = jacrev(jacrev(f))(x)
expected =
|
torch
|
torch.zeros(N, M, M, device=device)
|
self.assertEqual(result, expected)
def test_vjp_pytree_input(self, device):
def f(x):
|
torch
|
torch.zeros
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1+1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 = torch.cos(y)
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
self.assertEqual(grads[0], -x.sin())
self.assertEqual(grads[1], -y.sin())
def test_view_inplace_simple(self, device):
def foo(x):
x = x.clone()
x.view([]).sin_()
return x
x = torch.randn([], requires_grad=True, device=device)
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_invalid_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=-3)(x, y)
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=2)(x, y)
with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):
grad(torch.mul, argnums=[0])(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be int'):
grad(torch.mul, argnums=('0',))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, 0))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, -2))(x, y)
def test_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=0)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(0,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_out_of_order_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gy, gx = grad(torch.mul, argnums=(1, 0))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_negative_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=-2)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=-1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(-2,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(-2, -1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_grad_pytree_inputs(self, device):
x = torch.randn([], device=device)
def f(a, b):
x, y = a
return 1 * x + 2 * y + 3 * b['foo']
args = ((x, x), {'foo': x})
gx, gy = grad(f)(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), = grad(f, argnums=(0,))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), gz = grad(f, argnums=(0, 1))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
self.assertEqual(gz['foo'], torch.tensor(3., device=device))
def test_grad_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: [t, t], has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: (t, t + 2, t + 3), has_aux=True)(x)
def f(t):
y = t.sin()
return y.sum(), t.cos()
out, aux = grad(f, has_aux=True)(x)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.cos())
def test_grad_aux_pytree(self, device):
def f(x):
y = x.sin()
return y.sum(), {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, aux = grad(f, has_aux=True)(x)
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
self.assertEqual(out, x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), [x, aux]), has_aux=True)(x)
def test_zero_grad(self, device):
def f(x):
return (x['a']**2.0).sum()
inps = ({'a': torch.randn(10, device=device) + 3, 'b': torch.randn(10, device=device)})
grads = grad(f)(inps)
self.assertNotEqual(grads['a'].sum(), 0.0)
self.assertEqual(grads['b'].sum(), 0.0)
def test_unrelated_grad(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
def unrelated(x):
return y
result = grad(unrelated)(x)
self.assertEqual(result, torch.zeros_like(x))
def test_unrelated_vjp(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(x):
return y
out, vjp_fn = vjp(unrelated, x)
result = vjp_fn(v)
expected = (torch.zeros_like(x),)
self.assertEqual(result, expected)
def test_unrelated_vjp_multiple_inputs_outputs(self, device):
w = torch.tensor(3., device=device)
x = torch.tensor(4., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(w, x):
return y, y, x
out, vjp_fn = vjp(unrelated, w, x)
result = vjp_fn((v, v, v))
expected = (torch.zeros_like(x), torch.ones_like(x))
self.assertEqual(result, expected)
@onlyCPU
def test_unrelated_hessian(self, device):
N = 5
M = 3
W = torch.randn(N, M, device=device)
def f(x):
return W @ x
x = torch.randn(M)
result = jacrev(jacrev(f))(x)
expected = torch.zeros(N, M, M, device=device)
self.assertEqual(result, expected)
def test_vjp_pytree_input(self, device):
def f(x):
return x[0] * x[1][0]
x = torch.randn([], device=device)
v = torch.randn([], device=device)
out, vjp_fn = vjp(f, (x, (x, x)))
self.assertEqual(out, x * x)
result = vjp_fn(v)
self.assertEqual(result, ((x * v, (x * v, 0.)),))
def test_vjp_pytree_output(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
result, = vjp_fn((v1, (v2, v3)))
self.assertEqual(result, v1 + v2 + v3)
def test_vjp_outputs_can_any_pytree(self, device):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
for output in [None, ()]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): Expected f to be a function that has non-empty output"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): expected f\(\*primals\) to return only tensors"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
output, vjp_fn = vjp(lambda x: [x, x.sum()], x)
vjp_out, = vjp_fn([t, t.sum()])
assert isinstance(output, list) and len(output) == 2
assert isinstance(vjp_out, torch.Tensor)
output, vjp_fn = vjp(lambda x: {"x": x, "xsum": x.sum()}, x)
vjp_out, = vjp_fn({"x": t, "xsum": t.sum()})
assert isinstance(output, dict) and len(output) == 2 and "xsum" in output
assert isinstance(vjp_out, torch.Tensor)
def composite_output(x):
out = x.sum()
return [
(out, {"a": x, "out": [x, out]}),
]
output, vjp_fn = vjp(composite_output, x)
vjp_out, = vjp_fn([(t.sum(), {"a": t, "out": [t, t.sum()]}), ])
assert isinstance(output, list)
assert isinstance(output[0], tuple) and isinstance(output[0][1], dict)
assert isinstance(vjp_out, torch.Tensor)
def test_vjp_pytree_error(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
with self.assertRaisesRegex(RuntimeError, 'Expected pytree structure'):
result, = vjp_fn(((v1, (v2, v3)),))
def test_vjp_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: [t, t], x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: (t, t + 2, t + 3), x, has_aux=True)
def f(t):
y = t.sin()
return y, t.cos()
out, vjp_fn, aux = vjp(f, x, has_aux=True)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.sin())
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
def test_vjp_aux_pytree(self, device):
def f(x):
y = x.sin()
return y, {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, vjp_fn, aux = vjp(f, x, has_aux=True)
expected_out, expected_aux = f(x)
self.assertEqual(out, expected_out)
self.assertEqual(aux, expected_aux)
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, aux), x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, [x, aux]), x, has_aux=True)
def test_functional_init(self, device):
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 =
|
nn
|
nn.Linear(2, self.hidden_dim)
|
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
|
torch
|
torch.nn.Linear
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1+1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 = torch.cos(y)
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
self.assertEqual(grads[0], -x.sin())
self.assertEqual(grads[1], -y.sin())
def test_view_inplace_simple(self, device):
def foo(x):
x = x.clone()
x.view([]).sin_()
return x
x = torch.randn([], requires_grad=True, device=device)
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_invalid_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=-3)(x, y)
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=2)(x, y)
with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):
grad(torch.mul, argnums=[0])(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be int'):
grad(torch.mul, argnums=('0',))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, 0))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, -2))(x, y)
def test_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=0)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(0,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_out_of_order_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gy, gx = grad(torch.mul, argnums=(1, 0))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_negative_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=-2)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=-1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(-2,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(-2, -1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_grad_pytree_inputs(self, device):
x = torch.randn([], device=device)
def f(a, b):
x, y = a
return 1 * x + 2 * y + 3 * b['foo']
args = ((x, x), {'foo': x})
gx, gy = grad(f)(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), = grad(f, argnums=(0,))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), gz = grad(f, argnums=(0, 1))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
self.assertEqual(gz['foo'], torch.tensor(3., device=device))
def test_grad_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: [t, t], has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: (t, t + 2, t + 3), has_aux=True)(x)
def f(t):
y = t.sin()
return y.sum(), t.cos()
out, aux = grad(f, has_aux=True)(x)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.cos())
def test_grad_aux_pytree(self, device):
def f(x):
y = x.sin()
return y.sum(), {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, aux = grad(f, has_aux=True)(x)
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
self.assertEqual(out, x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), [x, aux]), has_aux=True)(x)
def test_zero_grad(self, device):
def f(x):
return (x['a']**2.0).sum()
inps = ({'a': torch.randn(10, device=device) + 3, 'b': torch.randn(10, device=device)})
grads = grad(f)(inps)
self.assertNotEqual(grads['a'].sum(), 0.0)
self.assertEqual(grads['b'].sum(), 0.0)
def test_unrelated_grad(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
def unrelated(x):
return y
result = grad(unrelated)(x)
self.assertEqual(result, torch.zeros_like(x))
def test_unrelated_vjp(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(x):
return y
out, vjp_fn = vjp(unrelated, x)
result = vjp_fn(v)
expected = (torch.zeros_like(x),)
self.assertEqual(result, expected)
def test_unrelated_vjp_multiple_inputs_outputs(self, device):
w = torch.tensor(3., device=device)
x = torch.tensor(4., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(w, x):
return y, y, x
out, vjp_fn = vjp(unrelated, w, x)
result = vjp_fn((v, v, v))
expected = (torch.zeros_like(x), torch.ones_like(x))
self.assertEqual(result, expected)
@onlyCPU
def test_unrelated_hessian(self, device):
N = 5
M = 3
W = torch.randn(N, M, device=device)
def f(x):
return W @ x
x = torch.randn(M)
result = jacrev(jacrev(f))(x)
expected = torch.zeros(N, M, M, device=device)
self.assertEqual(result, expected)
def test_vjp_pytree_input(self, device):
def f(x):
return x[0] * x[1][0]
x = torch.randn([], device=device)
v = torch.randn([], device=device)
out, vjp_fn = vjp(f, (x, (x, x)))
self.assertEqual(out, x * x)
result = vjp_fn(v)
self.assertEqual(result, ((x * v, (x * v, 0.)),))
def test_vjp_pytree_output(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
result, = vjp_fn((v1, (v2, v3)))
self.assertEqual(result, v1 + v2 + v3)
def test_vjp_outputs_can_any_pytree(self, device):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
for output in [None, ()]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): Expected f to be a function that has non-empty output"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): expected f\(\*primals\) to return only tensors"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
output, vjp_fn = vjp(lambda x: [x, x.sum()], x)
vjp_out, = vjp_fn([t, t.sum()])
assert isinstance(output, list) and len(output) == 2
assert isinstance(vjp_out, torch.Tensor)
output, vjp_fn = vjp(lambda x: {"x": x, "xsum": x.sum()}, x)
vjp_out, = vjp_fn({"x": t, "xsum": t.sum()})
assert isinstance(output, dict) and len(output) == 2 and "xsum" in output
assert isinstance(vjp_out, torch.Tensor)
def composite_output(x):
out = x.sum()
return [
(out, {"a": x, "out": [x, out]}),
]
output, vjp_fn = vjp(composite_output, x)
vjp_out, = vjp_fn([(t.sum(), {"a": t, "out": [t, t.sum()]}), ])
assert isinstance(output, list)
assert isinstance(output[0], tuple) and isinstance(output[0][1], dict)
assert isinstance(vjp_out, torch.Tensor)
def test_vjp_pytree_error(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
with self.assertRaisesRegex(RuntimeError, 'Expected pytree structure'):
result, = vjp_fn(((v1, (v2, v3)),))
def test_vjp_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: [t, t], x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: (t, t + 2, t + 3), x, has_aux=True)
def f(t):
y = t.sin()
return y, t.cos()
out, vjp_fn, aux = vjp(f, x, has_aux=True)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.sin())
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
def test_vjp_aux_pytree(self, device):
def f(x):
y = x.sin()
return y, {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, vjp_fn, aux = vjp(f, x, has_aux=True)
expected_out, expected_aux = f(x)
self.assertEqual(out, expected_out)
self.assertEqual(aux, expected_aux)
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, aux), x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, [x, aux]), x, has_aux=True)
def test_functional_init(self, device):
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x =
|
F
|
F.relu(x)
|
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
|
torch
|
torch.nn.functional.relu
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1+1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 = torch.cos(y)
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
self.assertEqual(grads[0], -x.sin())
self.assertEqual(grads[1], -y.sin())
def test_view_inplace_simple(self, device):
def foo(x):
x = x.clone()
x.view([]).sin_()
return x
x = torch.randn([], requires_grad=True, device=device)
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_invalid_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=-3)(x, y)
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=2)(x, y)
with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):
grad(torch.mul, argnums=[0])(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be int'):
grad(torch.mul, argnums=('0',))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, 0))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, -2))(x, y)
def test_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=0)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(0,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_out_of_order_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gy, gx = grad(torch.mul, argnums=(1, 0))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_negative_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=-2)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=-1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(-2,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(-2, -1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_grad_pytree_inputs(self, device):
x = torch.randn([], device=device)
def f(a, b):
x, y = a
return 1 * x + 2 * y + 3 * b['foo']
args = ((x, x), {'foo': x})
gx, gy = grad(f)(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), = grad(f, argnums=(0,))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), gz = grad(f, argnums=(0, 1))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
self.assertEqual(gz['foo'], torch.tensor(3., device=device))
def test_grad_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: [t, t], has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: (t, t + 2, t + 3), has_aux=True)(x)
def f(t):
y = t.sin()
return y.sum(), t.cos()
out, aux = grad(f, has_aux=True)(x)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.cos())
def test_grad_aux_pytree(self, device):
def f(x):
y = x.sin()
return y.sum(), {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, aux = grad(f, has_aux=True)(x)
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
self.assertEqual(out, x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), [x, aux]), has_aux=True)(x)
def test_zero_grad(self, device):
def f(x):
return (x['a']**2.0).sum()
inps = ({'a': torch.randn(10, device=device) + 3, 'b': torch.randn(10, device=device)})
grads = grad(f)(inps)
self.assertNotEqual(grads['a'].sum(), 0.0)
self.assertEqual(grads['b'].sum(), 0.0)
def test_unrelated_grad(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
def unrelated(x):
return y
result = grad(unrelated)(x)
self.assertEqual(result, torch.zeros_like(x))
def test_unrelated_vjp(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(x):
return y
out, vjp_fn = vjp(unrelated, x)
result = vjp_fn(v)
expected = (torch.zeros_like(x),)
self.assertEqual(result, expected)
def test_unrelated_vjp_multiple_inputs_outputs(self, device):
w = torch.tensor(3., device=device)
x = torch.tensor(4., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(w, x):
return y, y, x
out, vjp_fn = vjp(unrelated, w, x)
result = vjp_fn((v, v, v))
expected = (torch.zeros_like(x), torch.ones_like(x))
self.assertEqual(result, expected)
@onlyCPU
def test_unrelated_hessian(self, device):
N = 5
M = 3
W = torch.randn(N, M, device=device)
def f(x):
return W @ x
x = torch.randn(M)
result = jacrev(jacrev(f))(x)
expected = torch.zeros(N, M, M, device=device)
self.assertEqual(result, expected)
def test_vjp_pytree_input(self, device):
def f(x):
return x[0] * x[1][0]
x = torch.randn([], device=device)
v = torch.randn([], device=device)
out, vjp_fn = vjp(f, (x, (x, x)))
self.assertEqual(out, x * x)
result = vjp_fn(v)
self.assertEqual(result, ((x * v, (x * v, 0.)),))
def test_vjp_pytree_output(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
result, = vjp_fn((v1, (v2, v3)))
self.assertEqual(result, v1 + v2 + v3)
def test_vjp_outputs_can_any_pytree(self, device):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
for output in [None, ()]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): Expected f to be a function that has non-empty output"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): expected f\(\*primals\) to return only tensors"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
output, vjp_fn = vjp(lambda x: [x, x.sum()], x)
vjp_out, = vjp_fn([t, t.sum()])
assert isinstance(output, list) and len(output) == 2
assert isinstance(vjp_out, torch.Tensor)
output, vjp_fn = vjp(lambda x: {"x": x, "xsum": x.sum()}, x)
vjp_out, = vjp_fn({"x": t, "xsum": t.sum()})
assert isinstance(output, dict) and len(output) == 2 and "xsum" in output
assert isinstance(vjp_out, torch.Tensor)
def composite_output(x):
out = x.sum()
return [
(out, {"a": x, "out": [x, out]}),
]
output, vjp_fn = vjp(composite_output, x)
vjp_out, = vjp_fn([(t.sum(), {"a": t, "out": [t, t.sum()]}), ])
assert isinstance(output, list)
assert isinstance(output[0], tuple) and isinstance(output[0][1], dict)
assert isinstance(vjp_out, torch.Tensor)
def test_vjp_pytree_error(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
with self.assertRaisesRegex(RuntimeError, 'Expected pytree structure'):
result, = vjp_fn(((v1, (v2, v3)),))
def test_vjp_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: [t, t], x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: (t, t + 2, t + 3), x, has_aux=True)
def f(t):
y = t.sin()
return y, t.cos()
out, vjp_fn, aux = vjp(f, x, has_aux=True)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.sin())
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
def test_vjp_aux_pytree(self, device):
def f(x):
y = x.sin()
return y, {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, vjp_fn, aux = vjp(f, x, has_aux=True)
expected_out, expected_aux = f(x)
self.assertEqual(out, expected_out)
self.assertEqual(aux, expected_aux)
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, aux), x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, [x, aux]), x, has_aux=True)
def test_functional_init(self, device):
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x =
|
F
|
F.log_softmax(x, -1)
|
return x
B = 10
weights, fn, _ = functional_init(MLPClassifier, (B,), device=device)(32, 2)
|
torch
|
torch.nn.functional.log_softmax
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1+1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 = torch.cos(y)
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
self.assertEqual(grads[0], -x.sin())
self.assertEqual(grads[1], -y.sin())
def test_view_inplace_simple(self, device):
def foo(x):
x = x.clone()
x.view([]).sin_()
return x
x = torch.randn([], requires_grad=True, device=device)
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_invalid_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=-3)(x, y)
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=2)(x, y)
with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):
grad(torch.mul, argnums=[0])(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be int'):
grad(torch.mul, argnums=('0',))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, 0))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, -2))(x, y)
def test_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=0)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(0,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_out_of_order_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gy, gx = grad(torch.mul, argnums=(1, 0))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_negative_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=-2)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=-1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(-2,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(-2, -1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_grad_pytree_inputs(self, device):
x = torch.randn([], device=device)
def f(a, b):
x, y = a
return 1 * x + 2 * y + 3 * b['foo']
args = ((x, x), {'foo': x})
gx, gy = grad(f)(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), = grad(f, argnums=(0,))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), gz = grad(f, argnums=(0, 1))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
self.assertEqual(gz['foo'], torch.tensor(3., device=device))
def test_grad_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: [t, t], has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: (t, t + 2, t + 3), has_aux=True)(x)
def f(t):
y = t.sin()
return y.sum(), t.cos()
out, aux = grad(f, has_aux=True)(x)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.cos())
def test_grad_aux_pytree(self, device):
def f(x):
y = x.sin()
return y.sum(), {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, aux = grad(f, has_aux=True)(x)
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
self.assertEqual(out, x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), [x, aux]), has_aux=True)(x)
def test_zero_grad(self, device):
def f(x):
return (x['a']**2.0).sum()
inps = ({'a': torch.randn(10, device=device) + 3, 'b': torch.randn(10, device=device)})
grads = grad(f)(inps)
self.assertNotEqual(grads['a'].sum(), 0.0)
self.assertEqual(grads['b'].sum(), 0.0)
def test_unrelated_grad(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
def unrelated(x):
return y
result = grad(unrelated)(x)
self.assertEqual(result, torch.zeros_like(x))
def test_unrelated_vjp(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(x):
return y
out, vjp_fn = vjp(unrelated, x)
result = vjp_fn(v)
expected = (torch.zeros_like(x),)
self.assertEqual(result, expected)
def test_unrelated_vjp_multiple_inputs_outputs(self, device):
w = torch.tensor(3., device=device)
x = torch.tensor(4., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(w, x):
return y, y, x
out, vjp_fn = vjp(unrelated, w, x)
result = vjp_fn((v, v, v))
expected = (torch.zeros_like(x), torch.ones_like(x))
self.assertEqual(result, expected)
@onlyCPU
def test_unrelated_hessian(self, device):
N = 5
M = 3
W = torch.randn(N, M, device=device)
def f(x):
return W @ x
x = torch.randn(M)
result = jacrev(jacrev(f))(x)
expected = torch.zeros(N, M, M, device=device)
self.assertEqual(result, expected)
def test_vjp_pytree_input(self, device):
def f(x):
return x[0] * x[1][0]
x = torch.randn([], device=device)
v = torch.randn([], device=device)
out, vjp_fn = vjp(f, (x, (x, x)))
self.assertEqual(out, x * x)
result = vjp_fn(v)
self.assertEqual(result, ((x * v, (x * v, 0.)),))
def test_vjp_pytree_output(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
result, = vjp_fn((v1, (v2, v3)))
self.assertEqual(result, v1 + v2 + v3)
def test_vjp_outputs_can_any_pytree(self, device):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
for output in [None, ()]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): Expected f to be a function that has non-empty output"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): expected f\(\*primals\) to return only tensors"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
output, vjp_fn = vjp(lambda x: [x, x.sum()], x)
vjp_out, = vjp_fn([t, t.sum()])
assert isinstance(output, list) and len(output) == 2
assert isinstance(vjp_out, torch.Tensor)
output, vjp_fn = vjp(lambda x: {"x": x, "xsum": x.sum()}, x)
vjp_out, = vjp_fn({"x": t, "xsum": t.sum()})
assert isinstance(output, dict) and len(output) == 2 and "xsum" in output
assert isinstance(vjp_out, torch.Tensor)
def composite_output(x):
out = x.sum()
return [
(out, {"a": x, "out": [x, out]}),
]
output, vjp_fn = vjp(composite_output, x)
vjp_out, = vjp_fn([(t.sum(), {"a": t, "out": [t, t.sum()]}), ])
assert isinstance(output, list)
assert isinstance(output[0], tuple) and isinstance(output[0][1], dict)
assert isinstance(vjp_out, torch.Tensor)
def test_vjp_pytree_error(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
with self.assertRaisesRegex(RuntimeError, 'Expected pytree structure'):
result, = vjp_fn(((v1, (v2, v3)),))
def test_vjp_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: [t, t], x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: (t, t + 2, t + 3), x, has_aux=True)
def f(t):
y = t.sin()
return y, t.cos()
out, vjp_fn, aux = vjp(f, x, has_aux=True)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.sin())
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
def test_vjp_aux_pytree(self, device):
def f(x):
y = x.sin()
return y, {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, vjp_fn, aux = vjp(f, x, has_aux=True)
expected_out, expected_aux = f(x)
self.assertEqual(out, expected_out)
self.assertEqual(aux, expected_aux)
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, aux), x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, [x, aux]), x, has_aux=True)
def test_functional_init(self, device):
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
B = 10
weights, fn, _ = functional_init(MLPClassifier, (B,), device=device)(32, 2)
inputs = torch.randn(B, 7, 2, device=device)
vmap(fn)(weights, (inputs,))
def test_functional_init_with_buffers(self, device):
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.bn =
|
nn
|
nn.BatchNorm1d(self.hidden_dim, affine=True)
|
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
|
torch
|
torch.nn.BatchNorm1d
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestVmapOfGrad(TestCase):
def test_per_sample_grads_inplace_view(self, device):
def compute_loss(weight, x, t):
x = x.mm(weight)
y = x.squeeze_(0)
return (y - t).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 1, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected =
|
torch
|
torch.stack(expected)
|
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_new_zeros_materializes_tensor(self, device):
|
torch
|
torch.stack
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestVmapOfGrad(TestCase):
def test_per_sample_grads_inplace_view(self, device):
def compute_loss(weight, x, t):
x = x.mm(weight)
y = x.squeeze_(0)
return (y - t).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 1, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_new_zeros_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_zeros((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_new_empty_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_empty((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_per_sample_grads_simple(self, device):
def compute_loss(weight, x, t):
y = x @ weight
return ((y - t) ** 2).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_per_sample_grads_embeddingnet(self, device):
class SampleNet(nn.Module):
def __init__(self, vocab_size: int):
super().__init__()
self.emb =
|
nn
|
nn.Embedding(vocab_size, 16)
|
self.fc1 = nn.Linear(16, 16)
self.fc2 = nn.Linear(16, 2)
def forward(self, x):
|
torch
|
torch.nn.Embedding
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestVmapOfGrad(TestCase):
def test_per_sample_grads_inplace_view(self, device):
def compute_loss(weight, x, t):
x = x.mm(weight)
y = x.squeeze_(0)
return (y - t).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 1, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_new_zeros_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_zeros((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_new_empty_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_empty((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_per_sample_grads_simple(self, device):
def compute_loss(weight, x, t):
y = x @ weight
return ((y - t) ** 2).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_per_sample_grads_embeddingnet(self, device):
class SampleNet(nn.Module):
def __init__(self, vocab_size: int):
super().__init__()
self.emb = nn.Embedding(vocab_size, 16)
self.fc1 = nn.Linear(16, 16)
self.fc2 = nn.Linear(16, 2)
def forward(self, x):
x = self.emb(x)
x =
|
torch
|
torch.transpose(x, -1, -2)
|
x = torch.mean(x, -1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
|
torch
|
torch.transpose
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestVmapOfGrad(TestCase):
def test_per_sample_grads_inplace_view(self, device):
def compute_loss(weight, x, t):
x = x.mm(weight)
y = x.squeeze_(0)
return (y - t).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 1, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_new_zeros_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_zeros((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_new_empty_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_empty((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_per_sample_grads_simple(self, device):
def compute_loss(weight, x, t):
y = x @ weight
return ((y - t) ** 2).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_per_sample_grads_embeddingnet(self, device):
class SampleNet(nn.Module):
def __init__(self, vocab_size: int):
super().__init__()
self.emb = nn.Embedding(vocab_size, 16)
self.fc1 = nn.Linear(16, 16)
self.fc2 = nn.Linear(16, 2)
def forward(self, x):
x = self.emb(x)
x = torch.transpose(x, -1, -2)
x =
|
torch
|
torch.mean(x, -1)
|
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
|
torch
|
torch.mean
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestVmapOfGrad(TestCase):
def test_per_sample_grads_inplace_view(self, device):
def compute_loss(weight, x, t):
x = x.mm(weight)
y = x.squeeze_(0)
return (y - t).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 1, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_new_zeros_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_zeros((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_new_empty_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_empty((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_per_sample_grads_simple(self, device):
def compute_loss(weight, x, t):
y = x @ weight
return ((y - t) ** 2).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_per_sample_grads_embeddingnet(self, device):
class SampleNet(nn.Module):
def __init__(self, vocab_size: int):
super().__init__()
self.emb = nn.Embedding(vocab_size, 16)
self.fc1 = nn.Linear(16, 16)
self.fc2 = nn.Linear(16, 2)
def forward(self, x):
x = self.emb(x)
x = torch.transpose(x, -1, -2)
x = torch.mean(x, -1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
def name(self):
return "SampleNet"
vocab_size = 1000
batch_shape = [64]
words_per_sentence = 5
data = torch.randint(0, vocab_size, (*batch_shape, words_per_sentence), device=device)
targets = torch.randint(0, 1, (*batch_shape,), device=device)
net = SampleNet(vocab_size).to(device=device)
criterion =
|
nn
|
nn.CrossEntropyLoss()
|
net_func, weights = make_functional(net)
def compute_loss(weights, data, target):
|
torch
|
torch.nn.CrossEntropyLoss
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestVmapOfGrad(TestCase):
def test_per_sample_grads_inplace_view(self, device):
def compute_loss(weight, x, t):
x = x.mm(weight)
y = x.squeeze_(0)
return (y - t).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 1, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_new_zeros_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_zeros((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_new_empty_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_empty((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_per_sample_grads_simple(self, device):
def compute_loss(weight, x, t):
y = x @ weight
return ((y - t) ** 2).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_per_sample_grads_embeddingnet(self, device):
class SampleNet(nn.Module):
def __init__(self, vocab_size: int):
super().__init__()
self.emb = nn.Embedding(vocab_size, 16)
self.fc1 = nn.Linear(16, 16)
self.fc2 = nn.Linear(16, 2)
def forward(self, x):
x = self.emb(x)
x = torch.transpose(x, -1, -2)
x = torch.mean(x, -1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
def name(self):
return "SampleNet"
vocab_size = 1000
batch_shape = [64]
words_per_sentence = 5
data = torch.randint(0, vocab_size, (*batch_shape, words_per_sentence), device=device)
targets = torch.randint(0, 1, (*batch_shape,), device=device)
net = SampleNet(vocab_size).to(device=device)
criterion = nn.CrossEntropyLoss()
net_func, weights = make_functional(net)
def compute_loss(weights, data, target):
output = net_func(weights, data)
result = criterion(output, target)
return result
expected = [grad(compute_loss)(weights, data[i], targets[i]) for i in range(64)]
expected = zip(*expected)
expected = tuple(torch.stack(shards) for shards in expected)
result = vmap(partial(grad(compute_loss), weights))(data, targets)
for r, e in zip(result, expected):
self.assertEqual(r, e, atol=0, rtol=1e-4)
def test_log_softmax(self, device):
x = torch.randn(3, 5, device=device)
v = torch.randn(5, device=device)
def foo(x, v):
_, vjp_fn = vjp(partial(torch.log_softmax, dim=-1), x)
return vjp_fn(v)[0]
result = vmap(foo, (0, None))(x, v)
v = v.expand_as(x)
x.requires_grad_()
output =
|
torch
|
torch.log_softmax(x, dim=-1)
|
output.backward(v)
self.assertEqual(result, x.grad)
|
torch
|
torch.log_softmax
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
import copy
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
)
import torch
import torch.nn as nn
import torch.nn.functional as F
import unittest
import warnings
import math
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
from functools import partial
from functorch.experimental import replace_all_batch_norm_modules_
import functorch
from functorch import (
grad, vjp, vmap, jacrev, jacfwd, grad_and_value, hessian,
jvp, make_functional, make_functional_with_buffers,
combine_state_for_ensemble,
)
from functorch._src.make_functional import (
functional_init, functional_init_with_buffers,
)
from functorch._src.eager_transforms import _argnums_partial
from functorch._src.custom_function import custom_vjp
import numpy as np
USE_TORCHVISION = False
try:
import torchvision # noqa: F401
USE_TORCHVISION = True
except ImportError:
warnings.warn("Couldn't import torchvision. Some of our tests use it, try "
"to install it with commands from pytorch.org, post-fixed with "
"`--no-deps` to avoid overwriting the pytorch installation",
UserWarning)
class TestArgnumsPartial(TestCase):
def test_invalid_argnum_type(self):
x = torch.randn(3)
args = (x,)
with self.assertRaisesRegex(RuntimeError, "int or Tuple"):
_argnums_partial(torch.sin, args, 0.0)
with self.assertRaisesRegex(RuntimeError, "int or Tuple"):
_argnums_partial(torch.sin, args, [0])
with self.assertRaisesRegex(RuntimeError, "must be int"):
_argnums_partial(torch.sin, args, (0.0,))
args = (0.1, 1.1, 2.1, 3.1, 4.1)
def f(a, b, c, d, e):
return a
with self.assertRaisesRegex(RuntimeError, "must be int"):
_argnums_partial(torch.sin, args, ((0, 1), 2))
def test_out_of_bounds_argnum_values(self):
x = torch.randn(3)
args = (x,)
with self.assertRaisesRegex(RuntimeError, "positional inputs"):
_argnums_partial(torch.sin, args, 1)
with self.assertRaisesRegex(RuntimeError, "positional inputs"):
_argnums_partial(torch.sin, args, -2)
with self.assertRaisesRegex(RuntimeError, "positional inputs"):
_argnums_partial(torch.sin, args, (-2,))
def test_not_enough_argnums(self):
x = torch.randn(3)
args = (x,)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
_argnums_partial(torch.sin, args, ())
def test_duplicate_argnums(self):
x = torch.randn(3)
args = (x, x)
with self.assertRaisesRegex(RuntimeError, "must be unique"):
_argnums_partial(torch.add, args, (0, 0))
with self.assertRaisesRegex(RuntimeError, "must be unique"):
_argnums_partial(torch.add, args, (0, -2))
def test_flat_args_with_positive_int_argnum(self):
args = (0.1, 1.1, 2.1, 3.1, 4.1)
def f(a, b, c, d, e):
return a
f_new, res = _argnums_partial(f, args, 0)
self.assertEqual(res, (0.1,))
self.assertEqual(f_new(*res), 0.1)
f_new, res = _argnums_partial(f, args, 4)
self.assertEqual(res, (4.1,))
self.assertEqual(f_new(*res), 0.1)
def test_flat_args_with_negative_int_argnum(self):
args = (0.1, 1.1, 2.1, 3.1, 4.1)
def f(a, b, c, d, e):
return a
expected = f(*args)
f_new, res = _argnums_partial(f, args, -1)
self.assertEqual(res, (4.1,))
self.assertEqual(f_new(*res), expected)
f_new, res = _argnums_partial(f, args, -5)
self.assertEqual(res, (0.1,))
self.assertEqual(f_new(*res), expected)
def test_flat_args_with_tuple_argnum(self):
args = (0.1, 1.1, 2.1, 3.1, 4.1)
def f(a, b, c, d, e):
return a
f_new, res = _argnums_partial(f, args, (0, 1, 2, 3, 4))
self.assertEqual(f_new(*res), 0.1)
self.assertEqual(res, args)
f_new, res = _argnums_partial(f, args, (0, -3))
self.assertEqual(f_new(*res), 0.1)
self.assertEqual(res, (0.1, 2.1))
def test_pytree_args(self):
args = ((0.1, 1.1), 2.0, [3.1])
def f(a, b, c):
return a[0] + a[1] + b + c[0]
expected = f(*args)
f_new, res = _argnums_partial(f, args, 0)
self.assertEqual(res, args[0:1])
self.assertEqual(f_new(*res), expected)
f_new, res = _argnums_partial(f, args, (0,))
self.assertEqual(res, args[0:1])
self.assertEqual(f_new(*res), expected)
f_new, res = _argnums_partial(f, args, -1)
self.assertEqual(res, args[-1:])
self.assertEqual(f_new(*res), expected)
f_new, res = _argnums_partial(f, args, (0, -2))
self.assertEqual(res, args[0:2])
self.assertEqual(f_new(*res), expected)
def test_argnums_reorders(self):
args = ((0.1, 1.1, 2.1), 3.1, 4.1)
def f(a, b, c):
return a[0] + a[1] + a[2] + b + c
expected = f(*args)
f_new, res = _argnums_partial(f, args, (1, 0))
self.assertEqual(res, (args[1], args[0]))
self.assertEqual(f_new(*res), expected)
def test_function_with_default_args(self):
args = ((0.1, 1.1, 2.1), 3.1)
def f(a, b, c=4.1):
return a[0] + a[1] + a[2] + b + c
expected = f(*args)
f_new, res = _argnums_partial(f, args, -2)
self.assertEqual(res, args[0:1])
self.assertEqual(f_new(*res), expected)
args = ((0.1, 1.1, 2.1), 3.1, 5.1)
expected = f(*args)
f_new, res = _argnums_partial(f, args, -1)
self.assertEqual(res, args[-1:])
self.assertEqual(f_new(*res), expected)
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1+1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 = torch.cos(y)
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
self.assertEqual(grads[0], -x.sin())
self.assertEqual(grads[1], -y.sin())
def test_view_inplace_simple(self, device):
def foo(x):
x = x.clone()
x.view([]).sin_()
return x
x = torch.randn([], requires_grad=True, device=device)
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_invalid_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=-3)(x, y)
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=2)(x, y)
with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):
grad(torch.mul, argnums=[0])(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be int'):
grad(torch.mul, argnums=('0',))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, 0))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, -2))(x, y)
def test_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=0)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(0,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_out_of_order_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gy, gx = grad(torch.mul, argnums=(1, 0))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_negative_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=-2)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=-1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(-2,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(-2, -1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_grad_pytree_inputs(self, device):
x = torch.randn([], device=device)
def f(a, b):
x, y = a
return 1 * x + 2 * y + 3 * b['foo']
args = ((x, x), {'foo': x})
gx, gy = grad(f)(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), = grad(f, argnums=(0,))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), gz = grad(f, argnums=(0, 1))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
self.assertEqual(gz['foo'], torch.tensor(3., device=device))
def test_grad_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: [t, t], has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: (t, t + 2, t + 3), has_aux=True)(x)
def f(t):
y = t.sin()
return y.sum(), t.cos()
out, aux = grad(f, has_aux=True)(x)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.cos())
def test_grad_aux_pytree(self, device):
def f(x):
y = x.sin()
return y.sum(), {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, aux = grad(f, has_aux=True)(x)
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
self.assertEqual(out, x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), [x, aux]), has_aux=True)(x)
def test_zero_grad(self, device):
def f(x):
return (x['a']**2.0).sum()
inps = ({'a': torch.randn(10, device=device) + 3, 'b': torch.randn(10, device=device)})
grads = grad(f)(inps)
self.assertNotEqual(grads['a'].sum(), 0.0)
self.assertEqual(grads['b'].sum(), 0.0)
def test_unrelated_grad(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
def unrelated(x):
return y
result = grad(unrelated)(x)
self.assertEqual(result, torch.zeros_like(x))
def test_unrelated_vjp(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(x):
return y
out, vjp_fn = vjp(unrelated, x)
result = vjp_fn(v)
expected = (torch.zeros_like(x),)
self.assertEqual(result, expected)
def test_unrelated_vjp_multiple_inputs_outputs(self, device):
w = torch.tensor(3., device=device)
x = torch.tensor(4., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(w, x):
return y, y, x
out, vjp_fn = vjp(unrelated, w, x)
result = vjp_fn((v, v, v))
expected = (torch.zeros_like(x), torch.ones_like(x))
self.assertEqual(result, expected)
@onlyCPU
def test_unrelated_hessian(self, device):
N = 5
M = 3
W = torch.randn(N, M, device=device)
def f(x):
return W @ x
x = torch.randn(M)
result = jacrev(jacrev(f))(x)
expected = torch.zeros(N, M, M, device=device)
self.assertEqual(result, expected)
def test_vjp_pytree_input(self, device):
def f(x):
return x[0] * x[1][0]
x = torch.randn([], device=device)
v = torch.randn([], device=device)
out, vjp_fn = vjp(f, (x, (x, x)))
self.assertEqual(out, x * x)
result = vjp_fn(v)
self.assertEqual(result, ((x * v, (x * v, 0.)),))
def test_vjp_pytree_output(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
result, = vjp_fn((v1, (v2, v3)))
self.assertEqual(result, v1 + v2 + v3)
def test_vjp_outputs_can_any_pytree(self, device):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
for output in [None, ()]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): Expected f to be a function that has non-empty output"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): expected f\(\*primals\) to return only tensors"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
output, vjp_fn = vjp(lambda x: [x, x.sum()], x)
vjp_out, = vjp_fn([t, t.sum()])
assert isinstance(output, list) and len(output) == 2
assert isinstance(vjp_out, torch.Tensor)
output, vjp_fn = vjp(lambda x: {"x": x, "xsum": x.sum()}, x)
vjp_out, = vjp_fn({"x": t, "xsum": t.sum()})
assert isinstance(output, dict) and len(output) == 2 and "xsum" in output
assert isinstance(vjp_out, torch.Tensor)
def composite_output(x):
out = x.sum()
return [
(out, {"a": x, "out": [x, out]}),
]
output, vjp_fn = vjp(composite_output, x)
vjp_out, = vjp_fn([(t.sum(), {"a": t, "out": [t, t.sum()]}), ])
assert isinstance(output, list)
assert isinstance(output[0], tuple) and isinstance(output[0][1], dict)
assert isinstance(vjp_out, torch.Tensor)
def test_vjp_pytree_error(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
with self.assertRaisesRegex(RuntimeError, 'Expected pytree structure'):
result, = vjp_fn(((v1, (v2, v3)),))
def test_vjp_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: [t, t], x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: (t, t + 2, t + 3), x, has_aux=True)
def f(t):
y = t.sin()
return y, t.cos()
out, vjp_fn, aux = vjp(f, x, has_aux=True)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.sin())
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
def test_vjp_aux_pytree(self, device):
def f(x):
y = x.sin()
return y, {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, vjp_fn, aux = vjp(f, x, has_aux=True)
expected_out, expected_aux = f(x)
self.assertEqual(out, expected_out)
self.assertEqual(aux, expected_aux)
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, aux), x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, [x, aux]), x, has_aux=True)
def test_functional_init(self, device):
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
B = 10
weights, fn, _ = functional_init(MLPClassifier, (B,), device=device)(32, 2)
inputs = torch.randn(B, 7, 2, device=device)
vmap(fn)(weights, (inputs,))
def test_functional_init_with_buffers(self, device):
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.bn = nn.BatchNorm1d(self.hidden_dim, affine=True)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.bn(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
B = 10
weights, buffers, fn, _, _ = \
functional_init_with_buffers(MLPClassifier, [B], device=device)(32, 2)
inputs = torch.randn(B, 7, 2, device=device)
vmap(fn)(weights, buffers, (inputs,))
def test_advanced_indexing(self, device):
def f(value):
log_prob = torch.ones((), device=device)
val = (torch.zeros(()) > 0)
log_prob[val] = 0
return value
result = grad(f)(torch.randn((), device=device))
self.assertEqual(result, torch.ones_like(result))
def f2(value):
value = value.clone()
value[value > 0] = 0
return value.sum()
x = torch.randn(100, device=device)
result = grad(f2)(x)
self.assertEqual(result, (x <= 0).type_as(x))
def test_tensor_ctor_inside_grad(self, device):
def foo(x):
return x * torch.tensor(2., device=device)
x = torch.tensor(3.14, device=device)
functorch.grad(foo)(x)
@parametrize("op_list_data", [
subtest(([vmap, ], [(4, 2), (64, 3, 32, 32)]), name='vmap'),
subtest(([vmap, vmap], [(4, 3, 2), (64, 3, 32, 32)]), name='vmap_vmap'),
subtest(([grad, ], [(0, ), [], (4, 2), (64, 3, 32, 32)]), name='grad'),
subtest(([grad, grad], [[], ]), name='grad_grad'),
subtest(([vmap, grad], [(4, 2)]), name='vmap_grad'),
])
def test_tensor_print(self, device, op_list_data):
op_list, shapes = op_list_data
for dt in get_all_fp_dtypes():
data = [torch.randn(s, dtype=dt, device=device) for s in shapes]
for x in data:
buf = None
def foo(t):
nonlocal buf
buf = repr(t)
return t.mean()
fn = foo
bdim = 0
for op in reversed(op_list):
if op == vmap:
fn = op(fn, in_dims=bdim)
bdim += 1
else:
fn = op(fn)
expected = f"{repr(x)}"
level = 1
for op in op_list:
level += 1
if op == grad:
expected = f"GradTrackingTensor(lvl={level}, value={expected})"
elif op == vmap:
bdim -= 1
expected = f"BatchedTensor(lvl={level}, bdim={bdim}, value={expected})"
fn(x)
buf = buf.replace("\n", "").replace(" ", "")
expected = expected.replace("\n", "").replace(" ", "")
self.assertEqual(expected, buf)
def test_no_grad_outside(self, device):
x = torch.randn([], device=device, requires_grad=True)
with torch.no_grad():
y = grad(torch.sin)(x)
self.assertEqual(y, x.cos())
self.assertFalse(y.requires_grad)
def test_no_grad_inside(self, device):
def f(x):
with torch.no_grad():
shift = x ** 2
return x ** 2 - shift
x = torch.randn([], device=device)
y = grad(f)(x)
self.assertEqual(y, 2 * x)
y = grad(grad(f))(x)
self.assertEqual(y, 2)
x = torch.randn([], device=device, requires_grad=True)
y = grad(f)(x)
z, = torch.autograd.grad(y, x)
self.assertEqual(z, 2)
def test_no_grad_mixed(self, device):
def f(x):
with torch.no_grad():
shift = x ** 2
return x ** 2 - shift
x = torch.randn([], device=device, requires_grad=True)
with torch.no_grad():
y = grad(f)(x)
self.assertEqual(y, 2 * x)
self.assertFalse(y.requires_grad)
def test_no_grad_nested_simple(self, device):
def h(x):
with torch.no_grad():
shift = grad(lambda x: 0.25 * x ** 4)(x)
return x ** 3 - shift
x = torch.tensor(1.5, device=device, requires_grad=True)
y = grad(h)(x)
self.assertEqual(y, 3 * x ** 2)
z, = torch.autograd.grad(y, x)
self.assertEqual(z, 6 * x)
def test_no_grad_nested_complicated(self, device):
def f(x):
with torch.no_grad():
shift = x ** 3
return x ** 3 - shift
def g(x):
r1 = grad(f)(x)
with torch.no_grad():
shift = grad(f)(x)
return r1 - shift
x = torch.randn([], requires_grad=True, device=device)
y = grad(g)(x)
self.assertEqual(y, 6 * x)
z, = torch.autograd.grad(y, x)
self.assertEqual(z, 6)
def test_no_grad_value(self, device):
def h(x):
with torch.no_grad():
gvalue, value = grad_and_value(lambda x: x ** 3)(x)
return x ** 3 - value
x = torch.tensor(1.6, device=device, requires_grad=True)
y = grad(h)(x)
self.assertEqual(y, 3 * x ** 2)
z, = torch.autograd.grad(y, x)
self.assertEqual(z, 6 * x)
def test_no_grad_outside_vjp(self, device):
def h(x):
return x ** 2
x = torch.tensor(2., requires_grad=True, device=device)
with torch.no_grad():
out, vjp_fn = vjp(h, x)
y, = vjp_fn(torch.tensor(1., device=device))
self.assertEqual(y, 2 * x)
self.assertFalse(y.requires_grad)
self.assertFalse(out.requires_grad)
def test_no_grad_outside_vjp_fn(self, device):
def h(x):
return x ** 2
x = torch.tensor(3.14, requires_grad=True, device=device)
out, vjp_fn = vjp(h, x)
with torch.no_grad():
y, = vjp_fn(torch.tensor(1., device=device))
self.assertEqual(y, 2 * x)
self.assertFalse(y.requires_grad)
self.assertTrue(out.requires_grad)
z, = torch.autograd.grad(out, x)
self.assertEqual(z, 2 * x)
def test_no_grad_outside_vjp_only(self, device):
def h(x):
return x ** 2
x = torch.tensor(3.14, requires_grad=True, device=device)
with torch.no_grad():
out, vjp_fn = vjp(h, x)
y, = vjp_fn(torch.tensor(1., device=device))
self.assertEqual(y, 2 * x)
self.assertFalse(out.requires_grad)
self.assertTrue(y.requires_grad)
z, = torch.autograd.grad(y, x)
self.assertEqual(z, 2)
class TestVmapOfGrad(TestCase):
def test_per_sample_grads_inplace_view(self, device):
def compute_loss(weight, x, t):
x = x.mm(weight)
y = x.squeeze_(0)
return (y - t).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 1, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_new_zeros_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_zeros((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_new_empty_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_empty((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_per_sample_grads_simple(self, device):
def compute_loss(weight, x, t):
y = x @ weight
return ((y - t) ** 2).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_per_sample_grads_embeddingnet(self, device):
class SampleNet(nn.Module):
def __init__(self, vocab_size: int):
super().__init__()
self.emb = nn.Embedding(vocab_size, 16)
self.fc1 = nn.Linear(16, 16)
self.fc2 = nn.Linear(16, 2)
def forward(self, x):
x = self.emb(x)
x = torch.transpose(x, -1, -2)
x = torch.mean(x, -1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
def name(self):
return "SampleNet"
vocab_size = 1000
batch_shape = [64]
words_per_sentence = 5
data = torch.randint(0, vocab_size, (*batch_shape, words_per_sentence), device=device)
targets = torch.randint(0, 1, (*batch_shape,), device=device)
net = SampleNet(vocab_size).to(device=device)
criterion = nn.CrossEntropyLoss()
net_func, weights = make_functional(net)
def compute_loss(weights, data, target):
output = net_func(weights, data)
result = criterion(output, target)
return result
expected = [grad(compute_loss)(weights, data[i], targets[i]) for i in range(64)]
expected = zip(*expected)
expected = tuple(torch.stack(shards) for shards in expected)
result = vmap(partial(grad(compute_loss), weights))(data, targets)
for r, e in zip(result, expected):
self.assertEqual(r, e, atol=0, rtol=1e-4)
def test_log_softmax(self, device):
x = torch.randn(3, 5, device=device)
v = torch.randn(5, device=device)
def foo(x, v):
_, vjp_fn = vjp(partial(torch.log_softmax, dim=-1), x)
return vjp_fn(v)[0]
result = vmap(foo, (0, None))(x, v)
v = v.expand_as(x)
x.requires_grad_()
output = torch.log_softmax(x, dim=-1)
output.backward(v)
self.assertEqual(result, x.grad)
jacrev_and_jacfwd =
|
parametrize
|
parametrize("jacapi", [subtest(jacrev, name='jacrev'), subtest(jacfwd, name='jacfwd')])
|
FIXME_jacrev_only = parametrize("jacapi", [subtest(jacrev, name='jacrev')])
|
torch
|
torch.testing._internal.common_utils.parametrize
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestJac(TestCase):
@jacrev_and_jacfwd
def test_simple(self, device, jacapi):
x = torch.randn(3, device=device)
y = jacapi(torch.sin)(x)
expected =
|
torch
|
torch.diagflat(x.cos())
|
assert torch.allclose(y, expected)
@jacrev_and_jacfwd
def test_simple_not_flat(self, device, jacapi):
|
torch
|
torch.diagflat
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestJac(TestCase):
@jacrev_and_jacfwd
def test_simple(self, device, jacapi):
x = torch.randn(3, device=device)
y = jacapi(torch.sin)(x)
expected = torch.diagflat(x.cos())
assert torch.allclose(y, expected)
@jacrev_and_jacfwd
def test_simple_not_flat(self, device, jacapi):
x = torch.randn(2, 3, device=device)
y = jacapi(torch.sin)(x)
expected = torch.diagflat(x.view(-1).cos())
expected = expected.view(2, 3, 2, 3)
assert torch.allclose(y, expected)
@FIXME_jacrev_only
def test_diff_numel(self, device, jacapi):
x = torch.randn(2, 4, device=device)
def f(x):
return x[0, 1:].unsqueeze(-1)
y = jacapi(f)(x)
self.assertEqual(y.shape, (3, 1, 2, 4))
expected = x.new_zeros(3, 1, 2, 4)
expected[0, 0, 0, 1] = 1
expected[1, 0, 0, 2] = 1
expected[2, 0, 0, 3] = 1
self.assertEqual(y, expected)
@FIXME_jacrev_only
def test_vmap_on_jac_simple(self, device, jacapi):
x = torch.randn(2, 3, device=device)
y = vmap(jacapi(torch.sin))(x)
expected = torch.stack([torch.diagflat(x[i].cos()) for i in range(2)])
assert torch.allclose(y, expected)
@FIXME_jacrev_only
def test_nested_jac_simple(self, device, jacapi):
def foo(x):
return x.sin().sum()
x = torch.randn(3, device=device)
y = jacapi(jacapi(foo))(x)
expected = torch.diagflat(-x.sin())
assert torch.allclose(y, expected)
@jacrev_and_jacfwd
def test_multiple_args(self, device, jacapi):
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(torch.multiply, argnums=1)(x, y)
expected = torch.diagflat(x)
assert torch.allclose(z, expected)
@jacrev_and_jacfwd
def test_multiple_outputs_multiple_argnums(self, device, jacapi):
def f(x, y):
return 2 * x + 3 * y, 4 * x + 5 * y
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(f, argnums=(0, 1))(x, y)
expected_out0_x = torch.diagflat(torch.full_like(x, 2))
expected_out0_y = torch.diagflat(torch.full_like(y, 3))
expected_out1_x = torch.diagflat(torch.full_like(x, 4))
expected_out1_y = torch.diagflat(torch.full_like(y, 5))
self.assertEqual(len(z), 2)
self.assertTrue(isinstance(z, tuple))
self.assertEqual(len(z[0]), 2)
self.assertTrue(isinstance(z[0], tuple))
self.assertEqual(z[0][0], expected_out0_x)
self.assertEqual(z[0][1], expected_out0_y)
self.assertEqual(z[1][0], expected_out1_x)
self.assertEqual(z[1][1], expected_out1_y)
@jacrev_and_jacfwd
def test_multiple_outputs_single_argnums(self, device, jacapi):
def f(x, y):
return 2 * x + 3 * y, 4 * x + 5 * y
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected_out0_x = torch.diagflat(torch.full_like(x, 2))
expected_out1_x = torch.diagflat(torch.full_like(x, 4))
z = jacapi(f, argnums=0)(x, y)
self.assertEqual(len(z), 2)
self.assertTrue(isinstance(z, tuple))
self.assertEqual(z, (expected_out0_x, expected_out1_x))
z = jacapi(f, argnums=(0,))(x, y)
self.assertEqual(len(z), 2)
self.assertTrue(isinstance(z, tuple))
self.assertTrue(isinstance(z[0], tuple))
self.assertEqual(z, ((expected_out0_x,), (expected_out1_x,)))
@FIXME_jacrev_only
def test_multiple_outputs_pytree(self, device, jacapi):
def f(x, y):
return {'left': 2 * x + 3 * y, 'right': 4 * x + 5 * y}
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(f, argnums=(0, 1))(x, y)
expected_left_x = torch.diagflat(torch.full_like(x, 2))
expected_left_y = torch.diagflat(torch.full_like(y, 3))
expected_right_x = torch.diagflat(torch.full_like(x, 4))
expected_right_y = torch.diagflat(torch.full_like(y, 5))
expected = {
'left': (expected_left_x, expected_left_y),
'right': (expected_right_x, expected_right_y),
}
self.assertTrue(isinstance(z, dict))
self.assertTrue(isinstance(z['left'], tuple))
self.assertTrue(isinstance(z['right'], tuple))
self.assertEqual(z, expected)
@jacrev_and_jacfwd
def test_multiple_inputs_pytree(self, device, jacapi):
def f(a, b, c):
a0, a1 = a
return a0 + a1 * 2 + b * 3 + c * 4
x = torch.randn([], device=device)
args = ((x, x), x, x)
result = jacapi(f, argnums=(0, 1, 2))(*args)
expected = (
(torch.tensor(1., device=device), torch.tensor(2., device=device)),
torch.tensor(3., device=device),
torch.tensor(4., device=device),
)
self.assertEqual(result, expected)
result = jacapi(f, argnums=(0,))(*args)
expected = ((torch.tensor(1., device=device), torch.tensor(2., device=device)),)
self.assertEqual(result, expected)
result = jacapi(f)(*args)
expected = (torch.tensor(1., device=device), torch.tensor(2., device=device))
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_dimensionality(self, device, jacapi):
def f(x):
return x
x = torch.randn([], device=device)
result = jacapi(f)(x)
self.assertEqual(result.dim(), 0)
self.assertEqual(result, torch.ones_like(x))
x = torch.randn([1], device=device)
result = jacapi(f)(x)
self.assertEqual(result.dim(), 2)
self.assertEqual(result, x.new_ones(1, 1))
@FIXME_jacrev_only
def test_aux_tensor(self, device, jacapi):
def f(x):
y = x.clone()
return y, y.cos()
x = torch.randn(3, device=device)
result, aux = jacapi(f, has_aux=True)(x)
self.assertEqual(result, torch.eye(3, 3, device=device))
self.assertEqual(aux, x.cos())
@jacrev_and_jacfwd
def test_aux_pytree(self, device, jacapi):
def f(x):
y = x.clone()
return y, {'a': y.cos(), 'b': [y.tan()]}
x = torch.randn(3, device=device)
result, aux = jacapi(f, has_aux=True)(x)
self.assertEqual(result, torch.eye(3, 3, device=device))
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = jacapi(lambda x: (x, aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = jacapi(lambda x: (x, [x, aux]), has_aux=True)(x)
@jacrev_and_jacfwd
def test_outputs_can_any_pytree(self, device, jacapi):
x = torch.randn(2, 3, device=device)
for output in [None, ()]:
with self.assertRaisesRegex(
RuntimeError, r"(vjp|jvp).+: Expected f to be a function that has non-empty output"
):
jacapi(lambda _: output)(x)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
RuntimeError, r"(vjp|jvp).+: expected f\(\*primals\) to return only tensors"
):
jacapi(lambda _: output)(x)
out = jacapi(lambda x: [x, x.sum()])(x)
assert isinstance(out, list) and len(out) == 2
out = jacapi(lambda x: {"x": x, "xsum": x.sum()})(x)
assert isinstance(out, dict) and len(out) == 2 and "xsum" in out
def composite_output(x):
out = x.sum()
return [
(out, {"a": x, "out": [x, out]}),
]
out = jacapi(composite_output)(x)
assert isinstance(out, list)
assert isinstance(out[0], tuple) and isinstance(out[0][1], dict)
@jacrev_and_jacfwd
def test_multiple_inputs_outputs_pytree(self, device, jacapi):
def f(a, b, c):
a0, a1 = a
return a0 + a1 * 2, {'foo': b * 3 + c * 4}
x = torch.randn([], device=device)
zero = torch.zeros([], device=device)
args = ((x, x), x, x)
result = jacapi(f)(*args)
expected = (
(torch.tensor(1., device=device), torch.tensor(2., device=device)),
{'foo': (zero, zero)},
)
self.assertEqual(result, expected)
result = jacapi(f, argnums=(0,))(*args)
expected = (
((torch.tensor(1., device=device), torch.tensor(2., device=device)),),
{'foo': ((zero, zero),)},
)
self.assertEqual(result, expected)
result = jacapi(f, argnums=(0, 1))(*args)
expected = (
((torch.tensor(1., device=device), torch.tensor(2., device=device)), zero),
{'foo': ((zero, zero), torch.tensor(3., device=device))},
)
self.assertEqual(result, expected)
@FIXME_jacrev_only
def test_multiple_inputs_outputs_pytree_multidim(self, device, jacapi):
def f(dct):
a = dct['a']
b = dct['b']
return {'c': a.sin(), 'd': b.cos()}
x = torch.randn(3, device=device)
args = ({'a': x, 'b': x},)
result = jacapi(f)(*args)
expected = {
'c': {'a': x.cos().diagflat(), 'b': x.new_zeros(3, 3)},
'd': {'a': x.new_zeros(3, 3), 'b': -x.sin().diagflat()},
}
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_unrelated_input(self, device, jacapi):
def f(x, y):
return x
x = torch.randn(2, 3, device=device)
y = torch.randn(2, 3, device=device)
result = jacapi(f, argnums=(0, 1))(x, y)
expected0 =
|
torch
|
torch.eye(6, 6, device=device).view(2, 3, 2, 3)
|
expected1 = y.new_zeros(2, 3, 2, 3)
expected = (expected0, expected1)
self.assertTrue(isinstance(result, tuple))
self.assertEqual(result, expected)
|
torch
|
torch.eye.view
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestJac(TestCase):
@jacrev_and_jacfwd
def test_simple(self, device, jacapi):
x = torch.randn(3, device=device)
y = jacapi(torch.sin)(x)
expected = torch.diagflat(x.cos())
assert torch.allclose(y, expected)
@jacrev_and_jacfwd
def test_simple_not_flat(self, device, jacapi):
x = torch.randn(2, 3, device=device)
y = jacapi(torch.sin)(x)
expected = torch.diagflat(x.view(-1).cos())
expected = expected.view(2, 3, 2, 3)
assert torch.allclose(y, expected)
@FIXME_jacrev_only
def test_diff_numel(self, device, jacapi):
x = torch.randn(2, 4, device=device)
def f(x):
return x[0, 1:].unsqueeze(-1)
y = jacapi(f)(x)
self.assertEqual(y.shape, (3, 1, 2, 4))
expected = x.new_zeros(3, 1, 2, 4)
expected[0, 0, 0, 1] = 1
expected[1, 0, 0, 2] = 1
expected[2, 0, 0, 3] = 1
self.assertEqual(y, expected)
@FIXME_jacrev_only
def test_vmap_on_jac_simple(self, device, jacapi):
x = torch.randn(2, 3, device=device)
y = vmap(jacapi(torch.sin))(x)
expected = torch.stack([torch.diagflat(x[i].cos()) for i in range(2)])
assert torch.allclose(y, expected)
@FIXME_jacrev_only
def test_nested_jac_simple(self, device, jacapi):
def foo(x):
return x.sin().sum()
x = torch.randn(3, device=device)
y = jacapi(jacapi(foo))(x)
expected = torch.diagflat(-x.sin())
assert torch.allclose(y, expected)
@jacrev_and_jacfwd
def test_multiple_args(self, device, jacapi):
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(torch.multiply, argnums=1)(x, y)
expected = torch.diagflat(x)
assert torch.allclose(z, expected)
@jacrev_and_jacfwd
def test_multiple_outputs_multiple_argnums(self, device, jacapi):
def f(x, y):
return 2 * x + 3 * y, 4 * x + 5 * y
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(f, argnums=(0, 1))(x, y)
expected_out0_x = torch.diagflat(torch.full_like(x, 2))
expected_out0_y = torch.diagflat(torch.full_like(y, 3))
expected_out1_x = torch.diagflat(torch.full_like(x, 4))
expected_out1_y = torch.diagflat(torch.full_like(y, 5))
self.assertEqual(len(z), 2)
self.assertTrue(isinstance(z, tuple))
self.assertEqual(len(z[0]), 2)
self.assertTrue(isinstance(z[0], tuple))
self.assertEqual(z[0][0], expected_out0_x)
self.assertEqual(z[0][1], expected_out0_y)
self.assertEqual(z[1][0], expected_out1_x)
self.assertEqual(z[1][1], expected_out1_y)
@jacrev_and_jacfwd
def test_multiple_outputs_single_argnums(self, device, jacapi):
def f(x, y):
return 2 * x + 3 * y, 4 * x + 5 * y
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected_out0_x = torch.diagflat(torch.full_like(x, 2))
expected_out1_x = torch.diagflat(torch.full_like(x, 4))
z = jacapi(f, argnums=0)(x, y)
self.assertEqual(len(z), 2)
self.assertTrue(isinstance(z, tuple))
self.assertEqual(z, (expected_out0_x, expected_out1_x))
z = jacapi(f, argnums=(0,))(x, y)
self.assertEqual(len(z), 2)
self.assertTrue(isinstance(z, tuple))
self.assertTrue(isinstance(z[0], tuple))
self.assertEqual(z, ((expected_out0_x,), (expected_out1_x,)))
@FIXME_jacrev_only
def test_multiple_outputs_pytree(self, device, jacapi):
def f(x, y):
return {'left': 2 * x + 3 * y, 'right': 4 * x + 5 * y}
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(f, argnums=(0, 1))(x, y)
expected_left_x = torch.diagflat(torch.full_like(x, 2))
expected_left_y = torch.diagflat(torch.full_like(y, 3))
expected_right_x = torch.diagflat(torch.full_like(x, 4))
expected_right_y = torch.diagflat(torch.full_like(y, 5))
expected = {
'left': (expected_left_x, expected_left_y),
'right': (expected_right_x, expected_right_y),
}
self.assertTrue(isinstance(z, dict))
self.assertTrue(isinstance(z['left'], tuple))
self.assertTrue(isinstance(z['right'], tuple))
self.assertEqual(z, expected)
@jacrev_and_jacfwd
def test_multiple_inputs_pytree(self, device, jacapi):
def f(a, b, c):
a0, a1 = a
return a0 + a1 * 2 + b * 3 + c * 4
x = torch.randn([], device=device)
args = ((x, x), x, x)
result = jacapi(f, argnums=(0, 1, 2))(*args)
expected = (
(torch.tensor(1., device=device), torch.tensor(2., device=device)),
torch.tensor(3., device=device),
torch.tensor(4., device=device),
)
self.assertEqual(result, expected)
result = jacapi(f, argnums=(0,))(*args)
expected = ((torch.tensor(1., device=device), torch.tensor(2., device=device)),)
self.assertEqual(result, expected)
result = jacapi(f)(*args)
expected = (torch.tensor(1., device=device), torch.tensor(2., device=device))
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_dimensionality(self, device, jacapi):
def f(x):
return x
x = torch.randn([], device=device)
result = jacapi(f)(x)
self.assertEqual(result.dim(), 0)
self.assertEqual(result, torch.ones_like(x))
x = torch.randn([1], device=device)
result = jacapi(f)(x)
self.assertEqual(result.dim(), 2)
self.assertEqual(result, x.new_ones(1, 1))
@FIXME_jacrev_only
def test_aux_tensor(self, device, jacapi):
def f(x):
y = x.clone()
return y, y.cos()
x = torch.randn(3, device=device)
result, aux = jacapi(f, has_aux=True)(x)
self.assertEqual(result, torch.eye(3, 3, device=device))
self.assertEqual(aux, x.cos())
@jacrev_and_jacfwd
def test_aux_pytree(self, device, jacapi):
def f(x):
y = x.clone()
return y, {'a': y.cos(), 'b': [y.tan()]}
x = torch.randn(3, device=device)
result, aux = jacapi(f, has_aux=True)(x)
self.assertEqual(result, torch.eye(3, 3, device=device))
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = jacapi(lambda x: (x, aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = jacapi(lambda x: (x, [x, aux]), has_aux=True)(x)
@jacrev_and_jacfwd
def test_outputs_can_any_pytree(self, device, jacapi):
x = torch.randn(2, 3, device=device)
for output in [None, ()]:
with self.assertRaisesRegex(
RuntimeError, r"(vjp|jvp).+: Expected f to be a function that has non-empty output"
):
jacapi(lambda _: output)(x)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
RuntimeError, r"(vjp|jvp).+: expected f\(\*primals\) to return only tensors"
):
jacapi(lambda _: output)(x)
out = jacapi(lambda x: [x, x.sum()])(x)
assert isinstance(out, list) and len(out) == 2
out = jacapi(lambda x: {"x": x, "xsum": x.sum()})(x)
assert isinstance(out, dict) and len(out) == 2 and "xsum" in out
def composite_output(x):
out = x.sum()
return [
(out, {"a": x, "out": [x, out]}),
]
out = jacapi(composite_output)(x)
assert isinstance(out, list)
assert isinstance(out[0], tuple) and isinstance(out[0][1], dict)
@jacrev_and_jacfwd
def test_multiple_inputs_outputs_pytree(self, device, jacapi):
def f(a, b, c):
a0, a1 = a
return a0 + a1 * 2, {'foo': b * 3 + c * 4}
x = torch.randn([], device=device)
zero = torch.zeros([], device=device)
args = ((x, x), x, x)
result = jacapi(f)(*args)
expected = (
(torch.tensor(1., device=device), torch.tensor(2., device=device)),
{'foo': (zero, zero)},
)
self.assertEqual(result, expected)
result = jacapi(f, argnums=(0,))(*args)
expected = (
((torch.tensor(1., device=device), torch.tensor(2., device=device)),),
{'foo': ((zero, zero),)},
)
self.assertEqual(result, expected)
result = jacapi(f, argnums=(0, 1))(*args)
expected = (
((torch.tensor(1., device=device), torch.tensor(2., device=device)), zero),
{'foo': ((zero, zero), torch.tensor(3., device=device))},
)
self.assertEqual(result, expected)
@FIXME_jacrev_only
def test_multiple_inputs_outputs_pytree_multidim(self, device, jacapi):
def f(dct):
a = dct['a']
b = dct['b']
return {'c': a.sin(), 'd': b.cos()}
x = torch.randn(3, device=device)
args = ({'a': x, 'b': x},)
result = jacapi(f)(*args)
expected = {
'c': {'a': x.cos().diagflat(), 'b': x.new_zeros(3, 3)},
'd': {'a': x.new_zeros(3, 3), 'b': -x.sin().diagflat()},
}
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_unrelated_input(self, device, jacapi):
def f(x, y):
return x
x = torch.randn(2, 3, device=device)
y = torch.randn(2, 3, device=device)
result = jacapi(f, argnums=(0, 1))(x, y)
expected0 = torch.eye(6, 6, device=device).view(2, 3, 2, 3)
expected1 = y.new_zeros(2, 3, 2, 3)
expected = (expected0, expected1)
self.assertTrue(isinstance(result, tuple))
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_unrelated_output(self, device, jacapi):
y = torch.randn(2, 3, device=device)
def f(x):
return y
x = torch.randn(2, 3, device=device)
result = jacapi(f)(x)
expected = x.new_zeros(2, 3, 2, 3)
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_empty_output(self, device, jacapi):
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
def f(x, y):
return ()
with self.assertRaisesRegex(RuntimeError, 'xpected'):
jacapi(f)(x, y)
@jacrev_and_jacfwd
def test_argnums_tuple(self, device, jacapi):
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(torch.multiply, argnums=(0, 1))(x, y)
expected0 = torch.diagflat(y)
expected1 = torch.diagflat(x)
assert len(z) == 2
assert torch.allclose(z[0], expected0)
assert torch.allclose(z[1], expected1)
@jacrev_and_jacfwd
def test_argnums_effect_on_return(self, device, jacapi):
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(torch.multiply, argnums=(0,))(x, y)
expected0 = torch.diagflat(y)
assert isinstance(z, tuple)
assert len(z) == 1
assert torch.allclose(z[0], expected0)
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(torch.multiply, argnums=0)(x, y)
expected0 = torch.diagflat(y)
assert isinstance(z, torch.Tensor)
assert torch.allclose(z, expected0)
@jacrev_and_jacfwd
def test_argnums_defaults_to_zero(self, device, jacapi):
def f(x, y):
return x * 2 + y * 3
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(f)(x, y)
expected = torch.diagflat(torch.full_like(x, 2))
self.assertEqual(z, expected)
@jacrev_and_jacfwd
def test_empty_argnums(self, device, jacapi):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
jacapi(torch.sin, argnums=())(x)
@jacrev_and_jacfwd
def test_out_of_bounds_argnums(self, device, jacapi):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, "only 1 positional inputs"):
jacapi(torch.sin, argnums=2)(x)
@jacrev_and_jacfwd
def test_negative_argnums(self, device, jacapi):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, "only 1 positional inputs"):
jacapi(torch.sin, argnums=-2)(x)
@jacrev_and_jacfwd
def test_repeated_argnums(self, device, jacapi):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, "must be unique"):
jacapi(torch.sin, argnums=(0, 0))(x)
@jacrev_and_jacfwd
def test_float_argnums(self, device, jacapi):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, "must be int or Tuple"):
jacapi(torch.sin, argnums=0.0)(x)
with self.assertRaisesRegex(RuntimeError, "must be int"):
jacapi(torch.multiply, argnums=(1, 0.0))(x, x)
def test_hessian_simple(self, device):
def f(x):
return x.sin()
x = torch.randn(3, device=device)
hessian(f)(x)
def _test_against_reference(self, f, inputs, jacapi):
def foo(inputs):
return f(*inputs)
expected =
|
torch
|
torch.autograd.functional.jacobian(f, inputs)
|
result = jacapi(foo)(inputs)
self.assertEqual(result, expected)
@jacrev_and_jacfwd
|
torch
|
torch.autograd.functional.jacobian
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestHessian(TestCase):
def _test_against_reference(self, f, inputs):
def foo(inputs):
return f(*inputs)
expected =
|
torch
|
torch.autograd.functional.hessian(f, inputs)
|
result = hessian(foo)(inputs)
self.assertEqual(result, expected)
def test_hessian_vectorize_correctness_simple(self, device):
|
torch
|
torch.autograd.functional.hessian
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestMakeFunctional(TestCase):
def test_parameter_tying(self):
class Foo(nn.Module):
def __init__(self):
super().__init__()
self.bias =
|
nn
|
nn.Parameter(torch.randn(3))
|
self.linear = nn.Linear(3, 3)
self.linear.bias = self.bias
self.linear_tied = self.linear
|
torch
|
torch.nn.Parameter
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestMakeFunctional(TestCase):
def test_parameter_tying(self):
class Foo(nn.Module):
def __init__(self):
super().__init__()
self.bias = nn.Parameter(torch.randn(3))
self.linear = nn.Linear(3, 3)
self.linear.bias = self.bias
self.linear_tied = self.linear
mod = Foo()
with self.assertRaisesRegex(RuntimeError, "parameter tying"):
func, params = make_functional(mod)
def test_buffer_tying(self):
class Foo(nn.Module):
def __init__(self):
super().__init__()
self.bias = nn.Parameter(torch.randn(3))
self.linear = nn.Linear(3, 3)
self.register_buffer('buffer', torch.randn(3))
self.register_buffer('buffer_tied', self.buffer)
mod = Foo()
with self.assertRaisesRegex(RuntimeError, "parameter tying"):
func, params, buffers = make_functional_with_buffers(mod)
def test_combine_state_for_ensemble_error(self):
in_features = 2
out_features = 2
models = []
with self.assertRaisesRegex(RuntimeError, "Expected at least one model"):
_ = combine_state_for_ensemble(models)
num_models = 3
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
models[1].eval()
with self.assertRaisesRegex(RuntimeError, "same training/eval mode"):
_ = combine_state_for_ensemble(models)
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
models[1] =
|
torch
|
torch.nn.Conv2d(3, 3, (3, 3))
|
with self.assertRaisesRegex(RuntimeError, "models to be of the same class"):
_ = combine_state_for_ensemble(models)
def test_combine_state_for_ensemble_smoke(self):
|
torch
|
torch.nn.Conv2d
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestExamplesCorrectness(TestCase):
def test_maml_regression(self, device):
class ThreeLayerNet(nn.Module):
def __init__(self):
super(ThreeLayerNet, self).__init__()
self.fc1 = nn.Linear(1, 40)
self.relu1 =
|
nn
|
nn.ReLU()
|
self.fc2 = nn.Linear(40, 40)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(40, 1)
|
torch
|
torch.nn.ReLU
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestExamplesCorrectness(TestCase):
def test_maml_regression(self, device):
class ThreeLayerNet(nn.Module):
def __init__(self):
super(ThreeLayerNet, self).__init__()
self.fc1 = nn.Linear(1, 40)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(40, 40)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(40, 1)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.relu2(x)
x = self.fc3(x)
return x
def mse_loss(x, y):
return torch.mean((x - y) ** 2)
net, params = make_functional(ThreeLayerNet().to(device))
K = 20
num_tasks = 4
alpha = 0.1
def sample_tasks(outer_batch_size, inner_batch_size):
As = []
phases = []
for _ in range(outer_batch_size):
As.append(np.random.uniform(low=0.1, high=.5))
phases.append(np.random.uniform(low=0., high=np.pi))
def get_batch():
xs, ys = [], []
for A, phase in zip(As, phases):
x = np.random.uniform(low=-5., high=5., size=(inner_batch_size, 1))
y = A * np.sin(x + phase)
xs.append(x)
ys.append(y)
return torch.tensor(xs, dtype=torch.float, device=device), \
torch.tensor(ys, dtype=torch.float, device=device)
x1, y1 = get_batch()
x2, y2 = get_batch()
return x1, y1, x2, y2
def get_loss_for_task(use_transform, x1, y1, x2, y2):
def inner_loss(params, x1, y1):
f = net(params, x1)
loss = mse_loss(f, y1)
return loss
if use_transform:
grads = grad(inner_loss)(params, x1, y1)
else:
loss = inner_loss(params, x1, y1)
grads = torch.autograd.grad(loss, params, create_graph=True)
new_params = [(params[i] - alpha*grads[i]) for i in range(len(params))]
v_f = net(new_params, x2)
return mse_loss(v_f, y2)
task = sample_tasks(num_tasks, K)
inner_losses = vmap(partial(get_loss_for_task, True))(task[0], task[1], task[2], task[3])
loss2 = sum(inner_losses)/len(inner_losses)
result_grads = torch.autograd.grad(loss2, params)
inner_losses = [
get_loss_for_task(False, task[0][i], task[1][i], task[2][i], task[3][i])
for i in range(num_tasks)
]
loss2 = sum(inner_losses)/len(inner_losses)
expected_grads = torch.autograd.grad(loss2, params)
self.assertEqual(result_grads, expected_grads)
def test_maml_omniglot(self, device):
dtype = torch.double
inplace_relu = False
n_way = 5
n_inner_iter = 2
num_tasks = 2
net = nn.Sequential(
nn.Conv2d(1, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(64, n_way)).to(device).to(dtype)
fnet, params, buffers = make_functional_with_buffers(net)
net = (params, buffers, fnet)
def loss_for_task(net, n_inner_iter, use_transform, x_spt, y_spt, x_qry, y_qry):
params, buffers, fnet = net
querysz = x_qry.size(0)
def compute_loss(new_params, buffers, x, y):
logits = fnet(new_params, buffers, x)
loss =
|
F
|
F.cross_entropy(logits, y)
|
return loss
new_params = params
for _ in range(n_inner_iter):
|
torch
|
torch.nn.functional.cross_entropy
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestExamplesCorrectness(TestCase):
def test_maml_regression(self, device):
class ThreeLayerNet(nn.Module):
def __init__(self):
super(ThreeLayerNet, self).__init__()
self.fc1 = nn.Linear(1, 40)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(40, 40)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(40, 1)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.relu2(x)
x = self.fc3(x)
return x
def mse_loss(x, y):
return torch.mean((x - y) ** 2)
net, params = make_functional(ThreeLayerNet().to(device))
K = 20
num_tasks = 4
alpha = 0.1
def sample_tasks(outer_batch_size, inner_batch_size):
As = []
phases = []
for _ in range(outer_batch_size):
As.append(np.random.uniform(low=0.1, high=.5))
phases.append(np.random.uniform(low=0., high=np.pi))
def get_batch():
xs, ys = [], []
for A, phase in zip(As, phases):
x = np.random.uniform(low=-5., high=5., size=(inner_batch_size, 1))
y = A * np.sin(x + phase)
xs.append(x)
ys.append(y)
return torch.tensor(xs, dtype=torch.float, device=device), \
torch.tensor(ys, dtype=torch.float, device=device)
x1, y1 = get_batch()
x2, y2 = get_batch()
return x1, y1, x2, y2
def get_loss_for_task(use_transform, x1, y1, x2, y2):
def inner_loss(params, x1, y1):
f = net(params, x1)
loss = mse_loss(f, y1)
return loss
if use_transform:
grads = grad(inner_loss)(params, x1, y1)
else:
loss = inner_loss(params, x1, y1)
grads = torch.autograd.grad(loss, params, create_graph=True)
new_params = [(params[i] - alpha*grads[i]) for i in range(len(params))]
v_f = net(new_params, x2)
return mse_loss(v_f, y2)
task = sample_tasks(num_tasks, K)
inner_losses = vmap(partial(get_loss_for_task, True))(task[0], task[1], task[2], task[3])
loss2 = sum(inner_losses)/len(inner_losses)
result_grads = torch.autograd.grad(loss2, params)
inner_losses = [
get_loss_for_task(False, task[0][i], task[1][i], task[2][i], task[3][i])
for i in range(num_tasks)
]
loss2 = sum(inner_losses)/len(inner_losses)
expected_grads = torch.autograd.grad(loss2, params)
self.assertEqual(result_grads, expected_grads)
def test_maml_omniglot(self, device):
dtype = torch.double
inplace_relu = False
n_way = 5
n_inner_iter = 2
num_tasks = 2
net = nn.Sequential(
nn.Conv2d(1, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(64, n_way)).to(device).to(dtype)
fnet, params, buffers = make_functional_with_buffers(net)
net = (params, buffers, fnet)
def loss_for_task(net, n_inner_iter, use_transform, x_spt, y_spt, x_qry, y_qry):
params, buffers, fnet = net
querysz = x_qry.size(0)
def compute_loss(new_params, buffers, x, y):
logits = fnet(new_params, buffers, x)
loss = F.cross_entropy(logits, y)
return loss
new_params = params
for _ in range(n_inner_iter):
if use_transform:
grads = grad(compute_loss)(new_params, buffers, x_spt, y_spt)
else:
res = compute_loss(new_params, buffers, x_spt, y_spt)
grads = torch.autograd.grad(res, new_params, create_graph=True)
new_params = [p - g * 1e-1 for p, g, in zip(new_params, grads)]
qry_logits = fnet(new_params, buffers, x_qry)
qry_loss = F.cross_entropy(qry_logits, y_qry)
qry_acc = (qry_logits.argmax(
dim=1) == y_qry).sum() / querysz
return qry_loss, qry_acc
x_spt = torch.randn(num_tasks, 25, 1, 28, 28, dtype=dtype, device=device)
y_spt = torch.randint(0, 5, (num_tasks, 25), device=device)
x_qry = torch.randn(num_tasks, 75, 1, 28, 28, dtype=dtype, device=device)
y_qry = torch.randint(0, 5, (num_tasks, 75), device=device)
compute_loss = partial(loss_for_task, net, n_inner_iter, True)
qry_losses, _ = vmap(compute_loss)(x_spt, y_spt, x_qry, y_qry)
result_grads = torch.autograd.grad(qry_losses.sum(), params)
compute_loss = partial(loss_for_task, net, n_inner_iter, False)
losses = [compute_loss(x_spt[i], y_spt[i], x_qry[i], y_qry[i])[0]
for i in range(num_tasks)]
expected_grads = torch.autograd.grad(sum(losses), params)
self.assertEqual(result_grads, expected_grads)
@parametrize('originally_track_running_stats', [True, False])
def test_update_batch_norm(self, device, originally_track_running_stats):
dtype = torch.double
inplace_relu = False
classes = 5
num_batches = 2
net = nn.Sequential(
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=originally_track_running_stats),
nn.ReLU(inplace=inplace_relu),
nn.Flatten(),
nn.Linear(43264, classes)).to(device).to(dtype)
replace_all_batch_norm_modules_(net)
transformed_net = net
fnet, params, buffers = make_functional_with_buffers(transformed_net)
net = (params, buffers, fnet)
criterion = nn.CrossEntropyLoss()
def compute_loss(x, y, params, buffers):
return criterion(fnet(params, buffers, x), y)
x = torch.randn(num_batches, 1, 64, 28, 28, device=device, dtype=dtype)
y = torch.randint(0, classes, (num_batches, 1), device=device)
result_grads = vmap(grad(compute_loss, argnums=2), in_dims=(0, 0, None, None))(x, y, params, buffers)
fnet, params, buffers = make_functional_with_buffers(transformed_net)
expected_grads = [
torch.autograd.grad(compute_loss(x[i], y[i], params, buffers), params)
for i in range(num_batches)
]
expected_grads = [torch.stack(shards) for shards in zip(*expected_grads)]
self.assertEqual(result_grads, expected_grads)
def test_lennard_jones_batched_jacrev(self, device):
sigma = 0.5
epsilon = 4.
def lennard_jones(r):
return epsilon * ((sigma / r)**12 - (sigma / r)**6)
def lennard_jones_force(r):
"""Get magnitude of LJ force"""
return \
-epsilon * ((-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7))
r =
|
torch
|
torch.linspace(0.5, 2 * sigma, steps=100, requires_grad=True, device=device)
|
drs = torch.outer(r, torch.tensor([1.0, 0, 0], device=device))
norms = torch.norm(drs, dim=1).reshape(-1, 1)
training_energies = \
torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1)
|
torch
|
torch.linspace
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestExamplesCorrectness(TestCase):
def test_maml_regression(self, device):
class ThreeLayerNet(nn.Module):
def __init__(self):
super(ThreeLayerNet, self).__init__()
self.fc1 = nn.Linear(1, 40)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(40, 40)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(40, 1)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.relu2(x)
x = self.fc3(x)
return x
def mse_loss(x, y):
return torch.mean((x - y) ** 2)
net, params = make_functional(ThreeLayerNet().to(device))
K = 20
num_tasks = 4
alpha = 0.1
def sample_tasks(outer_batch_size, inner_batch_size):
As = []
phases = []
for _ in range(outer_batch_size):
As.append(np.random.uniform(low=0.1, high=.5))
phases.append(np.random.uniform(low=0., high=np.pi))
def get_batch():
xs, ys = [], []
for A, phase in zip(As, phases):
x = np.random.uniform(low=-5., high=5., size=(inner_batch_size, 1))
y = A * np.sin(x + phase)
xs.append(x)
ys.append(y)
return torch.tensor(xs, dtype=torch.float, device=device), \
torch.tensor(ys, dtype=torch.float, device=device)
x1, y1 = get_batch()
x2, y2 = get_batch()
return x1, y1, x2, y2
def get_loss_for_task(use_transform, x1, y1, x2, y2):
def inner_loss(params, x1, y1):
f = net(params, x1)
loss = mse_loss(f, y1)
return loss
if use_transform:
grads = grad(inner_loss)(params, x1, y1)
else:
loss = inner_loss(params, x1, y1)
grads = torch.autograd.grad(loss, params, create_graph=True)
new_params = [(params[i] - alpha*grads[i]) for i in range(len(params))]
v_f = net(new_params, x2)
return mse_loss(v_f, y2)
task = sample_tasks(num_tasks, K)
inner_losses = vmap(partial(get_loss_for_task, True))(task[0], task[1], task[2], task[3])
loss2 = sum(inner_losses)/len(inner_losses)
result_grads = torch.autograd.grad(loss2, params)
inner_losses = [
get_loss_for_task(False, task[0][i], task[1][i], task[2][i], task[3][i])
for i in range(num_tasks)
]
loss2 = sum(inner_losses)/len(inner_losses)
expected_grads = torch.autograd.grad(loss2, params)
self.assertEqual(result_grads, expected_grads)
def test_maml_omniglot(self, device):
dtype = torch.double
inplace_relu = False
n_way = 5
n_inner_iter = 2
num_tasks = 2
net = nn.Sequential(
nn.Conv2d(1, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(64, n_way)).to(device).to(dtype)
fnet, params, buffers = make_functional_with_buffers(net)
net = (params, buffers, fnet)
def loss_for_task(net, n_inner_iter, use_transform, x_spt, y_spt, x_qry, y_qry):
params, buffers, fnet = net
querysz = x_qry.size(0)
def compute_loss(new_params, buffers, x, y):
logits = fnet(new_params, buffers, x)
loss = F.cross_entropy(logits, y)
return loss
new_params = params
for _ in range(n_inner_iter):
if use_transform:
grads = grad(compute_loss)(new_params, buffers, x_spt, y_spt)
else:
res = compute_loss(new_params, buffers, x_spt, y_spt)
grads = torch.autograd.grad(res, new_params, create_graph=True)
new_params = [p - g * 1e-1 for p, g, in zip(new_params, grads)]
qry_logits = fnet(new_params, buffers, x_qry)
qry_loss = F.cross_entropy(qry_logits, y_qry)
qry_acc = (qry_logits.argmax(
dim=1) == y_qry).sum() / querysz
return qry_loss, qry_acc
x_spt = torch.randn(num_tasks, 25, 1, 28, 28, dtype=dtype, device=device)
y_spt = torch.randint(0, 5, (num_tasks, 25), device=device)
x_qry = torch.randn(num_tasks, 75, 1, 28, 28, dtype=dtype, device=device)
y_qry = torch.randint(0, 5, (num_tasks, 75), device=device)
compute_loss = partial(loss_for_task, net, n_inner_iter, True)
qry_losses, _ = vmap(compute_loss)(x_spt, y_spt, x_qry, y_qry)
result_grads = torch.autograd.grad(qry_losses.sum(), params)
compute_loss = partial(loss_for_task, net, n_inner_iter, False)
losses = [compute_loss(x_spt[i], y_spt[i], x_qry[i], y_qry[i])[0]
for i in range(num_tasks)]
expected_grads = torch.autograd.grad(sum(losses), params)
self.assertEqual(result_grads, expected_grads)
@parametrize('originally_track_running_stats', [True, False])
def test_update_batch_norm(self, device, originally_track_running_stats):
dtype = torch.double
inplace_relu = False
classes = 5
num_batches = 2
net = nn.Sequential(
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=originally_track_running_stats),
nn.ReLU(inplace=inplace_relu),
nn.Flatten(),
nn.Linear(43264, classes)).to(device).to(dtype)
replace_all_batch_norm_modules_(net)
transformed_net = net
fnet, params, buffers = make_functional_with_buffers(transformed_net)
net = (params, buffers, fnet)
criterion = nn.CrossEntropyLoss()
def compute_loss(x, y, params, buffers):
return criterion(fnet(params, buffers, x), y)
x = torch.randn(num_batches, 1, 64, 28, 28, device=device, dtype=dtype)
y = torch.randint(0, classes, (num_batches, 1), device=device)
result_grads = vmap(grad(compute_loss, argnums=2), in_dims=(0, 0, None, None))(x, y, params, buffers)
fnet, params, buffers = make_functional_with_buffers(transformed_net)
expected_grads = [
torch.autograd.grad(compute_loss(x[i], y[i], params, buffers), params)
for i in range(num_batches)
]
expected_grads = [torch.stack(shards) for shards in zip(*expected_grads)]
self.assertEqual(result_grads, expected_grads)
def test_lennard_jones_batched_jacrev(self, device):
sigma = 0.5
epsilon = 4.
def lennard_jones(r):
return epsilon * ((sigma / r)**12 - (sigma / r)**6)
def lennard_jones_force(r):
"""Get magnitude of LJ force"""
return \
-epsilon * ((-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7))
r = torch.linspace(0.5, 2 * sigma, steps=100, requires_grad=True, device=device)
drs =
|
torch
|
torch.outer(r, torch.tensor([1.0, 0, 0], device=device))
|
norms = torch.norm(drs, dim=1).reshape(-1, 1)
training_energies = \
torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1)
training_forces = torch.stack(
|
torch
|
torch.outer
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestExamplesCorrectness(TestCase):
def test_maml_regression(self, device):
class ThreeLayerNet(nn.Module):
def __init__(self):
super(ThreeLayerNet, self).__init__()
self.fc1 = nn.Linear(1, 40)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(40, 40)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(40, 1)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.relu2(x)
x = self.fc3(x)
return x
def mse_loss(x, y):
return torch.mean((x - y) ** 2)
net, params = make_functional(ThreeLayerNet().to(device))
K = 20
num_tasks = 4
alpha = 0.1
def sample_tasks(outer_batch_size, inner_batch_size):
As = []
phases = []
for _ in range(outer_batch_size):
As.append(np.random.uniform(low=0.1, high=.5))
phases.append(np.random.uniform(low=0., high=np.pi))
def get_batch():
xs, ys = [], []
for A, phase in zip(As, phases):
x = np.random.uniform(low=-5., high=5., size=(inner_batch_size, 1))
y = A * np.sin(x + phase)
xs.append(x)
ys.append(y)
return torch.tensor(xs, dtype=torch.float, device=device), \
torch.tensor(ys, dtype=torch.float, device=device)
x1, y1 = get_batch()
x2, y2 = get_batch()
return x1, y1, x2, y2
def get_loss_for_task(use_transform, x1, y1, x2, y2):
def inner_loss(params, x1, y1):
f = net(params, x1)
loss = mse_loss(f, y1)
return loss
if use_transform:
grads = grad(inner_loss)(params, x1, y1)
else:
loss = inner_loss(params, x1, y1)
grads = torch.autograd.grad(loss, params, create_graph=True)
new_params = [(params[i] - alpha*grads[i]) for i in range(len(params))]
v_f = net(new_params, x2)
return mse_loss(v_f, y2)
task = sample_tasks(num_tasks, K)
inner_losses = vmap(partial(get_loss_for_task, True))(task[0], task[1], task[2], task[3])
loss2 = sum(inner_losses)/len(inner_losses)
result_grads = torch.autograd.grad(loss2, params)
inner_losses = [
get_loss_for_task(False, task[0][i], task[1][i], task[2][i], task[3][i])
for i in range(num_tasks)
]
loss2 = sum(inner_losses)/len(inner_losses)
expected_grads = torch.autograd.grad(loss2, params)
self.assertEqual(result_grads, expected_grads)
def test_maml_omniglot(self, device):
dtype = torch.double
inplace_relu = False
n_way = 5
n_inner_iter = 2
num_tasks = 2
net = nn.Sequential(
nn.Conv2d(1, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(64, n_way)).to(device).to(dtype)
fnet, params, buffers = make_functional_with_buffers(net)
net = (params, buffers, fnet)
def loss_for_task(net, n_inner_iter, use_transform, x_spt, y_spt, x_qry, y_qry):
params, buffers, fnet = net
querysz = x_qry.size(0)
def compute_loss(new_params, buffers, x, y):
logits = fnet(new_params, buffers, x)
loss = F.cross_entropy(logits, y)
return loss
new_params = params
for _ in range(n_inner_iter):
if use_transform:
grads = grad(compute_loss)(new_params, buffers, x_spt, y_spt)
else:
res = compute_loss(new_params, buffers, x_spt, y_spt)
grads = torch.autograd.grad(res, new_params, create_graph=True)
new_params = [p - g * 1e-1 for p, g, in zip(new_params, grads)]
qry_logits = fnet(new_params, buffers, x_qry)
qry_loss = F.cross_entropy(qry_logits, y_qry)
qry_acc = (qry_logits.argmax(
dim=1) == y_qry).sum() / querysz
return qry_loss, qry_acc
x_spt = torch.randn(num_tasks, 25, 1, 28, 28, dtype=dtype, device=device)
y_spt = torch.randint(0, 5, (num_tasks, 25), device=device)
x_qry = torch.randn(num_tasks, 75, 1, 28, 28, dtype=dtype, device=device)
y_qry = torch.randint(0, 5, (num_tasks, 75), device=device)
compute_loss = partial(loss_for_task, net, n_inner_iter, True)
qry_losses, _ = vmap(compute_loss)(x_spt, y_spt, x_qry, y_qry)
result_grads = torch.autograd.grad(qry_losses.sum(), params)
compute_loss = partial(loss_for_task, net, n_inner_iter, False)
losses = [compute_loss(x_spt[i], y_spt[i], x_qry[i], y_qry[i])[0]
for i in range(num_tasks)]
expected_grads = torch.autograd.grad(sum(losses), params)
self.assertEqual(result_grads, expected_grads)
@parametrize('originally_track_running_stats', [True, False])
def test_update_batch_norm(self, device, originally_track_running_stats):
dtype = torch.double
inplace_relu = False
classes = 5
num_batches = 2
net = nn.Sequential(
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=originally_track_running_stats),
nn.ReLU(inplace=inplace_relu),
nn.Flatten(),
nn.Linear(43264, classes)).to(device).to(dtype)
replace_all_batch_norm_modules_(net)
transformed_net = net
fnet, params, buffers = make_functional_with_buffers(transformed_net)
net = (params, buffers, fnet)
criterion = nn.CrossEntropyLoss()
def compute_loss(x, y, params, buffers):
return criterion(fnet(params, buffers, x), y)
x = torch.randn(num_batches, 1, 64, 28, 28, device=device, dtype=dtype)
y = torch.randint(0, classes, (num_batches, 1), device=device)
result_grads = vmap(grad(compute_loss, argnums=2), in_dims=(0, 0, None, None))(x, y, params, buffers)
fnet, params, buffers = make_functional_with_buffers(transformed_net)
expected_grads = [
torch.autograd.grad(compute_loss(x[i], y[i], params, buffers), params)
for i in range(num_batches)
]
expected_grads = [torch.stack(shards) for shards in zip(*expected_grads)]
self.assertEqual(result_grads, expected_grads)
def test_lennard_jones_batched_jacrev(self, device):
sigma = 0.5
epsilon = 4.
def lennard_jones(r):
return epsilon * ((sigma / r)**12 - (sigma / r)**6)
def lennard_jones_force(r):
"""Get magnitude of LJ force"""
return \
-epsilon * ((-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7))
r = torch.linspace(0.5, 2 * sigma, steps=100, requires_grad=True, device=device)
drs = torch.outer(r, torch.tensor([1.0, 0, 0], device=device))
norms =
|
torch
|
torch.norm(drs, dim=1).reshape(-1, 1)
|
training_energies = \
torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1)
training_forces = torch.stack(
[force * dr
|
torch
|
torch.norm.reshape
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestExamplesCorrectness(TestCase):
def test_maml_regression(self, device):
class ThreeLayerNet(nn.Module):
def __init__(self):
super(ThreeLayerNet, self).__init__()
self.fc1 = nn.Linear(1, 40)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(40, 40)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(40, 1)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.relu2(x)
x = self.fc3(x)
return x
def mse_loss(x, y):
return torch.mean((x - y) ** 2)
net, params = make_functional(ThreeLayerNet().to(device))
K = 20
num_tasks = 4
alpha = 0.1
def sample_tasks(outer_batch_size, inner_batch_size):
As = []
phases = []
for _ in range(outer_batch_size):
As.append(np.random.uniform(low=0.1, high=.5))
phases.append(np.random.uniform(low=0., high=np.pi))
def get_batch():
xs, ys = [], []
for A, phase in zip(As, phases):
x = np.random.uniform(low=-5., high=5., size=(inner_batch_size, 1))
y = A * np.sin(x + phase)
xs.append(x)
ys.append(y)
return torch.tensor(xs, dtype=torch.float, device=device), \
torch.tensor(ys, dtype=torch.float, device=device)
x1, y1 = get_batch()
x2, y2 = get_batch()
return x1, y1, x2, y2
def get_loss_for_task(use_transform, x1, y1, x2, y2):
def inner_loss(params, x1, y1):
f = net(params, x1)
loss = mse_loss(f, y1)
return loss
if use_transform:
grads = grad(inner_loss)(params, x1, y1)
else:
loss = inner_loss(params, x1, y1)
grads = torch.autograd.grad(loss, params, create_graph=True)
new_params = [(params[i] - alpha*grads[i]) for i in range(len(params))]
v_f = net(new_params, x2)
return mse_loss(v_f, y2)
task = sample_tasks(num_tasks, K)
inner_losses = vmap(partial(get_loss_for_task, True))(task[0], task[1], task[2], task[3])
loss2 = sum(inner_losses)/len(inner_losses)
result_grads = torch.autograd.grad(loss2, params)
inner_losses = [
get_loss_for_task(False, task[0][i], task[1][i], task[2][i], task[3][i])
for i in range(num_tasks)
]
loss2 = sum(inner_losses)/len(inner_losses)
expected_grads = torch.autograd.grad(loss2, params)
self.assertEqual(result_grads, expected_grads)
def test_maml_omniglot(self, device):
dtype = torch.double
inplace_relu = False
n_way = 5
n_inner_iter = 2
num_tasks = 2
net = nn.Sequential(
nn.Conv2d(1, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(64, n_way)).to(device).to(dtype)
fnet, params, buffers = make_functional_with_buffers(net)
net = (params, buffers, fnet)
def loss_for_task(net, n_inner_iter, use_transform, x_spt, y_spt, x_qry, y_qry):
params, buffers, fnet = net
querysz = x_qry.size(0)
def compute_loss(new_params, buffers, x, y):
logits = fnet(new_params, buffers, x)
loss = F.cross_entropy(logits, y)
return loss
new_params = params
for _ in range(n_inner_iter):
if use_transform:
grads = grad(compute_loss)(new_params, buffers, x_spt, y_spt)
else:
res = compute_loss(new_params, buffers, x_spt, y_spt)
grads = torch.autograd.grad(res, new_params, create_graph=True)
new_params = [p - g * 1e-1 for p, g, in zip(new_params, grads)]
qry_logits = fnet(new_params, buffers, x_qry)
qry_loss = F.cross_entropy(qry_logits, y_qry)
qry_acc = (qry_logits.argmax(
dim=1) == y_qry).sum() / querysz
return qry_loss, qry_acc
x_spt = torch.randn(num_tasks, 25, 1, 28, 28, dtype=dtype, device=device)
y_spt = torch.randint(0, 5, (num_tasks, 25), device=device)
x_qry = torch.randn(num_tasks, 75, 1, 28, 28, dtype=dtype, device=device)
y_qry = torch.randint(0, 5, (num_tasks, 75), device=device)
compute_loss = partial(loss_for_task, net, n_inner_iter, True)
qry_losses, _ = vmap(compute_loss)(x_spt, y_spt, x_qry, y_qry)
result_grads = torch.autograd.grad(qry_losses.sum(), params)
compute_loss = partial(loss_for_task, net, n_inner_iter, False)
losses = [compute_loss(x_spt[i], y_spt[i], x_qry[i], y_qry[i])[0]
for i in range(num_tasks)]
expected_grads = torch.autograd.grad(sum(losses), params)
self.assertEqual(result_grads, expected_grads)
@parametrize('originally_track_running_stats', [True, False])
def test_update_batch_norm(self, device, originally_track_running_stats):
dtype = torch.double
inplace_relu = False
classes = 5
num_batches = 2
net = nn.Sequential(
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=originally_track_running_stats),
nn.ReLU(inplace=inplace_relu),
nn.Flatten(),
nn.Linear(43264, classes)).to(device).to(dtype)
replace_all_batch_norm_modules_(net)
transformed_net = net
fnet, params, buffers = make_functional_with_buffers(transformed_net)
net = (params, buffers, fnet)
criterion = nn.CrossEntropyLoss()
def compute_loss(x, y, params, buffers):
return criterion(fnet(params, buffers, x), y)
x = torch.randn(num_batches, 1, 64, 28, 28, device=device, dtype=dtype)
y = torch.randint(0, classes, (num_batches, 1), device=device)
result_grads = vmap(grad(compute_loss, argnums=2), in_dims=(0, 0, None, None))(x, y, params, buffers)
fnet, params, buffers = make_functional_with_buffers(transformed_net)
expected_grads = [
torch.autograd.grad(compute_loss(x[i], y[i], params, buffers), params)
for i in range(num_batches)
]
expected_grads = [torch.stack(shards) for shards in zip(*expected_grads)]
self.assertEqual(result_grads, expected_grads)
def test_lennard_jones_batched_jacrev(self, device):
sigma = 0.5
epsilon = 4.
def lennard_jones(r):
return epsilon * ((sigma / r)**12 - (sigma / r)**6)
def lennard_jones_force(r):
"""Get magnitude of LJ force"""
return \
-epsilon * ((-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7))
r = torch.linspace(0.5, 2 * sigma, steps=100, requires_grad=True, device=device)
drs = torch.outer(r, torch.tensor([1.0, 0, 0], device=device))
norms = torch.norm(drs, dim=1).reshape(-1, 1)
training_energies = \
torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1)
training_forces = torch.stack(
[force * dr
for force, dr in zip(map(lennard_jones_force, norms), drs)])
model = nn.Sequential(
nn.Linear(1, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 1)
).to(device)
def make_prediction(model, drs, use_functorch):
norms = torch.norm(drs, dim=1).reshape(-1, 1)
energies = model(norms)
if use_functorch:
network_derivs = vmap(jacrev(model))(norms).squeeze(-1)
forces = -network_derivs * drs / norms
else:
forces = []
for r, dr in zip(norms, drs):
network_deriv = torch.autograd.functional.jacobian(
model, r, create_graph=True)
force = -network_deriv * dr / r
forces.append(force)
forces =
|
torch
|
torch.cat(forces)
|
return energies, forces
def loss_fn(energies, forces, predicted_energies, predicted_forces):
return F.mse_loss(energies, predicted_energies) + \
|
torch
|
torch.cat
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestExamplesCorrectness(TestCase):
def test_maml_regression(self, device):
class ThreeLayerNet(nn.Module):
def __init__(self):
super(ThreeLayerNet, self).__init__()
self.fc1 = nn.Linear(1, 40)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(40, 40)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(40, 1)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.relu2(x)
x = self.fc3(x)
return x
def mse_loss(x, y):
return torch.mean((x - y) ** 2)
net, params = make_functional(ThreeLayerNet().to(device))
K = 20
num_tasks = 4
alpha = 0.1
def sample_tasks(outer_batch_size, inner_batch_size):
As = []
phases = []
for _ in range(outer_batch_size):
As.append(np.random.uniform(low=0.1, high=.5))
phases.append(np.random.uniform(low=0., high=np.pi))
def get_batch():
xs, ys = [], []
for A, phase in zip(As, phases):
x = np.random.uniform(low=-5., high=5., size=(inner_batch_size, 1))
y = A * np.sin(x + phase)
xs.append(x)
ys.append(y)
return torch.tensor(xs, dtype=torch.float, device=device), \
torch.tensor(ys, dtype=torch.float, device=device)
x1, y1 = get_batch()
x2, y2 = get_batch()
return x1, y1, x2, y2
def get_loss_for_task(use_transform, x1, y1, x2, y2):
def inner_loss(params, x1, y1):
f = net(params, x1)
loss = mse_loss(f, y1)
return loss
if use_transform:
grads = grad(inner_loss)(params, x1, y1)
else:
loss = inner_loss(params, x1, y1)
grads = torch.autograd.grad(loss, params, create_graph=True)
new_params = [(params[i] - alpha*grads[i]) for i in range(len(params))]
v_f = net(new_params, x2)
return mse_loss(v_f, y2)
task = sample_tasks(num_tasks, K)
inner_losses = vmap(partial(get_loss_for_task, True))(task[0], task[1], task[2], task[3])
loss2 = sum(inner_losses)/len(inner_losses)
result_grads = torch.autograd.grad(loss2, params)
inner_losses = [
get_loss_for_task(False, task[0][i], task[1][i], task[2][i], task[3][i])
for i in range(num_tasks)
]
loss2 = sum(inner_losses)/len(inner_losses)
expected_grads = torch.autograd.grad(loss2, params)
self.assertEqual(result_grads, expected_grads)
def test_maml_omniglot(self, device):
dtype = torch.double
inplace_relu = False
n_way = 5
n_inner_iter = 2
num_tasks = 2
net = nn.Sequential(
nn.Conv2d(1, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(64, n_way)).to(device).to(dtype)
fnet, params, buffers = make_functional_with_buffers(net)
net = (params, buffers, fnet)
def loss_for_task(net, n_inner_iter, use_transform, x_spt, y_spt, x_qry, y_qry):
params, buffers, fnet = net
querysz = x_qry.size(0)
def compute_loss(new_params, buffers, x, y):
logits = fnet(new_params, buffers, x)
loss = F.cross_entropy(logits, y)
return loss
new_params = params
for _ in range(n_inner_iter):
if use_transform:
grads = grad(compute_loss)(new_params, buffers, x_spt, y_spt)
else:
res = compute_loss(new_params, buffers, x_spt, y_spt)
grads = torch.autograd.grad(res, new_params, create_graph=True)
new_params = [p - g * 1e-1 for p, g, in zip(new_params, grads)]
qry_logits = fnet(new_params, buffers, x_qry)
qry_loss = F.cross_entropy(qry_logits, y_qry)
qry_acc = (qry_logits.argmax(
dim=1) == y_qry).sum() / querysz
return qry_loss, qry_acc
x_spt = torch.randn(num_tasks, 25, 1, 28, 28, dtype=dtype, device=device)
y_spt = torch.randint(0, 5, (num_tasks, 25), device=device)
x_qry = torch.randn(num_tasks, 75, 1, 28, 28, dtype=dtype, device=device)
y_qry = torch.randint(0, 5, (num_tasks, 75), device=device)
compute_loss = partial(loss_for_task, net, n_inner_iter, True)
qry_losses, _ = vmap(compute_loss)(x_spt, y_spt, x_qry, y_qry)
result_grads = torch.autograd.grad(qry_losses.sum(), params)
compute_loss = partial(loss_for_task, net, n_inner_iter, False)
losses = [compute_loss(x_spt[i], y_spt[i], x_qry[i], y_qry[i])[0]
for i in range(num_tasks)]
expected_grads = torch.autograd.grad(sum(losses), params)
self.assertEqual(result_grads, expected_grads)
@parametrize('originally_track_running_stats', [True, False])
def test_update_batch_norm(self, device, originally_track_running_stats):
dtype = torch.double
inplace_relu = False
classes = 5
num_batches = 2
net = nn.Sequential(
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=originally_track_running_stats),
nn.ReLU(inplace=inplace_relu),
nn.Flatten(),
nn.Linear(43264, classes)).to(device).to(dtype)
replace_all_batch_norm_modules_(net)
transformed_net = net
fnet, params, buffers = make_functional_with_buffers(transformed_net)
net = (params, buffers, fnet)
criterion = nn.CrossEntropyLoss()
def compute_loss(x, y, params, buffers):
return criterion(fnet(params, buffers, x), y)
x = torch.randn(num_batches, 1, 64, 28, 28, device=device, dtype=dtype)
y = torch.randint(0, classes, (num_batches, 1), device=device)
result_grads = vmap(grad(compute_loss, argnums=2), in_dims=(0, 0, None, None))(x, y, params, buffers)
fnet, params, buffers = make_functional_with_buffers(transformed_net)
expected_grads = [
torch.autograd.grad(compute_loss(x[i], y[i], params, buffers), params)
for i in range(num_batches)
]
expected_grads = [torch.stack(shards) for shards in zip(*expected_grads)]
self.assertEqual(result_grads, expected_grads)
def test_lennard_jones_batched_jacrev(self, device):
sigma = 0.5
epsilon = 4.
def lennard_jones(r):
return epsilon * ((sigma / r)**12 - (sigma / r)**6)
def lennard_jones_force(r):
"""Get magnitude of LJ force"""
return \
-epsilon * ((-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7))
r = torch.linspace(0.5, 2 * sigma, steps=100, requires_grad=True, device=device)
drs = torch.outer(r, torch.tensor([1.0, 0, 0], device=device))
norms = torch.norm(drs, dim=1).reshape(-1, 1)
training_energies = \
torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1)
training_forces = torch.stack(
[force * dr
for force, dr in zip(map(lennard_jones_force, norms), drs)])
model = nn.Sequential(
nn.Linear(1, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 1)
).to(device)
def make_prediction(model, drs, use_functorch):
norms = torch.norm(drs, dim=1).reshape(-1, 1)
energies = model(norms)
if use_functorch:
network_derivs = vmap(jacrev(model))(norms).squeeze(-1)
forces = -network_derivs * drs / norms
else:
forces = []
for r, dr in zip(norms, drs):
network_deriv = torch.autograd.functional.jacobian(
model, r, create_graph=True)
force = -network_deriv * dr / r
forces.append(force)
forces = torch.cat(forces)
return energies, forces
def loss_fn(energies, forces, predicted_energies, predicted_forces):
return F.mse_loss(energies, predicted_energies) + \
0.01 * F.mse_loss(forces, predicted_forces) / 3
energies, forces = make_prediction(model, drs, use_functorch=True)
loss = loss_fn(training_energies, training_forces, energies, forces)
result = torch.autograd.grad(loss, model.parameters())
energies, forces = make_prediction(model, drs, use_functorch=False)
loss = loss_fn(training_energies, training_forces, energies, forces)
expected = torch.autograd.grad(loss, model.parameters())
self.assertEqual(result, expected)
def test_ensemble_regression(self, device):
def make_spirals(n_samples, noise_std=0., rotations=1.):
ts = torch.linspace(0, 1, n_samples)
rs = ts ** 0.5
thetas = rs * rotations * 2 * math.pi
signs = torch.randint(0, 2, (n_samples,)) * 2 - 1
labels = (signs > 0).to(torch.long)
xs = rs * signs * torch.cos(thetas) + torch.randn(n_samples) * noise_std
ys = rs * signs * torch.sin(thetas) + torch.randn(n_samples) * noise_std
points = torch.stack([xs, ys], dim=1)
return points.to(device), labels.to(device)
points, labels = make_spirals(100, noise_std=0.05)
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
loss_fn =
|
nn
|
nn.NLLLoss()
|
func_model, weights = make_functional(MLPClassifier().to(device))
def train_step_fn(use_transform, weights, batch, targets, lr=0.2):
|
torch
|
torch.nn.NLLLoss
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestExamplesCorrectness(TestCase):
def test_maml_regression(self, device):
class ThreeLayerNet(nn.Module):
def __init__(self):
super(ThreeLayerNet, self).__init__()
self.fc1 = nn.Linear(1, 40)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(40, 40)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(40, 1)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.relu2(x)
x = self.fc3(x)
return x
def mse_loss(x, y):
return torch.mean((x - y) ** 2)
net, params = make_functional(ThreeLayerNet().to(device))
K = 20
num_tasks = 4
alpha = 0.1
def sample_tasks(outer_batch_size, inner_batch_size):
As = []
phases = []
for _ in range(outer_batch_size):
As.append(np.random.uniform(low=0.1, high=.5))
phases.append(np.random.uniform(low=0., high=np.pi))
def get_batch():
xs, ys = [], []
for A, phase in zip(As, phases):
x = np.random.uniform(low=-5., high=5., size=(inner_batch_size, 1))
y = A * np.sin(x + phase)
xs.append(x)
ys.append(y)
return torch.tensor(xs, dtype=torch.float, device=device), \
torch.tensor(ys, dtype=torch.float, device=device)
x1, y1 = get_batch()
x2, y2 = get_batch()
return x1, y1, x2, y2
def get_loss_for_task(use_transform, x1, y1, x2, y2):
def inner_loss(params, x1, y1):
f = net(params, x1)
loss = mse_loss(f, y1)
return loss
if use_transform:
grads = grad(inner_loss)(params, x1, y1)
else:
loss = inner_loss(params, x1, y1)
grads = torch.autograd.grad(loss, params, create_graph=True)
new_params = [(params[i] - alpha*grads[i]) for i in range(len(params))]
v_f = net(new_params, x2)
return mse_loss(v_f, y2)
task = sample_tasks(num_tasks, K)
inner_losses = vmap(partial(get_loss_for_task, True))(task[0], task[1], task[2], task[3])
loss2 = sum(inner_losses)/len(inner_losses)
result_grads = torch.autograd.grad(loss2, params)
inner_losses = [
get_loss_for_task(False, task[0][i], task[1][i], task[2][i], task[3][i])
for i in range(num_tasks)
]
loss2 = sum(inner_losses)/len(inner_losses)
expected_grads = torch.autograd.grad(loss2, params)
self.assertEqual(result_grads, expected_grads)
def test_maml_omniglot(self, device):
dtype = torch.double
inplace_relu = False
n_way = 5
n_inner_iter = 2
num_tasks = 2
net = nn.Sequential(
nn.Conv2d(1, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(64, n_way)).to(device).to(dtype)
fnet, params, buffers = make_functional_with_buffers(net)
net = (params, buffers, fnet)
def loss_for_task(net, n_inner_iter, use_transform, x_spt, y_spt, x_qry, y_qry):
params, buffers, fnet = net
querysz = x_qry.size(0)
def compute_loss(new_params, buffers, x, y):
logits = fnet(new_params, buffers, x)
loss = F.cross_entropy(logits, y)
return loss
new_params = params
for _ in range(n_inner_iter):
if use_transform:
grads = grad(compute_loss)(new_params, buffers, x_spt, y_spt)
else:
res = compute_loss(new_params, buffers, x_spt, y_spt)
grads = torch.autograd.grad(res, new_params, create_graph=True)
new_params = [p - g * 1e-1 for p, g, in zip(new_params, grads)]
qry_logits = fnet(new_params, buffers, x_qry)
qry_loss = F.cross_entropy(qry_logits, y_qry)
qry_acc = (qry_logits.argmax(
dim=1) == y_qry).sum() / querysz
return qry_loss, qry_acc
x_spt = torch.randn(num_tasks, 25, 1, 28, 28, dtype=dtype, device=device)
y_spt = torch.randint(0, 5, (num_tasks, 25), device=device)
x_qry = torch.randn(num_tasks, 75, 1, 28, 28, dtype=dtype, device=device)
y_qry = torch.randint(0, 5, (num_tasks, 75), device=device)
compute_loss = partial(loss_for_task, net, n_inner_iter, True)
qry_losses, _ = vmap(compute_loss)(x_spt, y_spt, x_qry, y_qry)
result_grads = torch.autograd.grad(qry_losses.sum(), params)
compute_loss = partial(loss_for_task, net, n_inner_iter, False)
losses = [compute_loss(x_spt[i], y_spt[i], x_qry[i], y_qry[i])[0]
for i in range(num_tasks)]
expected_grads = torch.autograd.grad(sum(losses), params)
self.assertEqual(result_grads, expected_grads)
@parametrize('originally_track_running_stats', [True, False])
def test_update_batch_norm(self, device, originally_track_running_stats):
dtype = torch.double
inplace_relu = False
classes = 5
num_batches = 2
net = nn.Sequential(
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=originally_track_running_stats),
nn.ReLU(inplace=inplace_relu),
nn.Flatten(),
nn.Linear(43264, classes)).to(device).to(dtype)
replace_all_batch_norm_modules_(net)
transformed_net = net
fnet, params, buffers = make_functional_with_buffers(transformed_net)
net = (params, buffers, fnet)
criterion = nn.CrossEntropyLoss()
def compute_loss(x, y, params, buffers):
return criterion(fnet(params, buffers, x), y)
x = torch.randn(num_batches, 1, 64, 28, 28, device=device, dtype=dtype)
y = torch.randint(0, classes, (num_batches, 1), device=device)
result_grads = vmap(grad(compute_loss, argnums=2), in_dims=(0, 0, None, None))(x, y, params, buffers)
fnet, params, buffers = make_functional_with_buffers(transformed_net)
expected_grads = [
torch.autograd.grad(compute_loss(x[i], y[i], params, buffers), params)
for i in range(num_batches)
]
expected_grads = [torch.stack(shards) for shards in zip(*expected_grads)]
self.assertEqual(result_grads, expected_grads)
def test_lennard_jones_batched_jacrev(self, device):
sigma = 0.5
epsilon = 4.
def lennard_jones(r):
return epsilon * ((sigma / r)**12 - (sigma / r)**6)
def lennard_jones_force(r):
"""Get magnitude of LJ force"""
return \
-epsilon * ((-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7))
r = torch.linspace(0.5, 2 * sigma, steps=100, requires_grad=True, device=device)
drs = torch.outer(r, torch.tensor([1.0, 0, 0], device=device))
norms = torch.norm(drs, dim=1).reshape(-1, 1)
training_energies = \
torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1)
training_forces = torch.stack(
[force * dr
for force, dr in zip(map(lennard_jones_force, norms), drs)])
model = nn.Sequential(
nn.Linear(1, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 1)
).to(device)
def make_prediction(model, drs, use_functorch):
norms = torch.norm(drs, dim=1).reshape(-1, 1)
energies = model(norms)
if use_functorch:
network_derivs = vmap(jacrev(model))(norms).squeeze(-1)
forces = -network_derivs * drs / norms
else:
forces = []
for r, dr in zip(norms, drs):
network_deriv = torch.autograd.functional.jacobian(
model, r, create_graph=True)
force = -network_deriv * dr / r
forces.append(force)
forces = torch.cat(forces)
return energies, forces
def loss_fn(energies, forces, predicted_energies, predicted_forces):
return F.mse_loss(energies, predicted_energies) + \
0.01 * F.mse_loss(forces, predicted_forces) / 3
energies, forces = make_prediction(model, drs, use_functorch=True)
loss = loss_fn(training_energies, training_forces, energies, forces)
result = torch.autograd.grad(loss, model.parameters())
energies, forces = make_prediction(model, drs, use_functorch=False)
loss = loss_fn(training_energies, training_forces, energies, forces)
expected = torch.autograd.grad(loss, model.parameters())
self.assertEqual(result, expected)
def test_ensemble_regression(self, device):
def make_spirals(n_samples, noise_std=0., rotations=1.):
ts = torch.linspace(0, 1, n_samples)
rs = ts ** 0.5
thetas = rs * rotations * 2 * math.pi
signs = torch.randint(0, 2, (n_samples,)) * 2 - 1
labels = (signs > 0).to(torch.long)
xs = rs * signs * torch.cos(thetas) + torch.randn(n_samples) * noise_std
ys = rs * signs * torch.sin(thetas) + torch.randn(n_samples) * noise_std
points = torch.stack([xs, ys], dim=1)
return points.to(device), labels.to(device)
points, labels = make_spirals(100, noise_std=0.05)
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
loss_fn = nn.NLLLoss()
func_model, weights = make_functional(MLPClassifier().to(device))
def train_step_fn(use_transform, weights, batch, targets, lr=0.2):
def compute_loss(weights, batch, targets):
output = func_model(weights, batch)
loss = loss_fn(output, targets)
return loss
if use_transform:
grad_weights, loss = grad_and_value(compute_loss)(weights, batch, targets)
else:
loss = compute_loss(weights, batch, targets)
grad_weights = torch.autograd.grad(loss, weights)
new_weights = []
with torch.no_grad():
for grad_weight, weight in zip(grad_weights, weights):
new_weights.append(weight - grad_weight * lr)
return (loss, *new_weights)
def unpack(train_result):
return train_result[0], train_result[1:]
def init_fn(num_models):
models = tuple(MLPClassifier().to(device) for _ in range(num_models))
weights = tuple(make_functional(model)[1] for model in models)
weights = tuple(zip(*weights))
weights = tuple(torch.stack(shards).detach() for shards in weights)
return weights
def slice_weights(batched_weights, index):
return tuple(weight[index].detach().requires_grad_() for weight in batched_weights)
batched_weights = init_fn(num_models=2)
parallel_train_step_fn = vmap(partial(train_step_fn, True), in_dims=(0, None, None))
result_loss, result_weights = unpack(parallel_train_step_fn(batched_weights, points, labels))
loss0, weights0 = unpack(train_step_fn(False, slice_weights(batched_weights, 0), points, labels))
loss1, weights1 = unpack(train_step_fn(False, slice_weights(batched_weights, 1), points, labels))
expected_loss = torch.stack([loss0, loss1])
expected_weights = tuple(torch.stack([w0, w1]) for w0, w1 in zip(weights0, weights1))
self.assertEqual(result_loss, expected_loss)
self.assertEqual(result_weights, expected_weights)
@parametrize("dropout_layer", [nn.Dropout, nn.AlphaDropout, nn.FeatureAlphaDropout])
def test_find_learning_rate_ensembling(self, device, dropout_layer):
points, labels = torch.randn(100, 2, 2, 2, 2, device=device), torch.randint(0, 2, (100,), device=device)
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.dropout = dropout_layer()
self.fc1 = nn.Linear(16, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.dropout(x)
x =
|
torch
|
torch.flatten(x, start_dim=1)
|
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
|
torch
|
torch.flatten
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests, is_iterable_of_tensors
import torch
from torch import Tensor
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_dtype import integral_types
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
from torch.utils._pytree import tree_flatten, tree_unflatten, tree_map
import torch.autograd.forward_ad as fwAD
def _autograd_grad(outputs, inputs, grad_outputs=None, retain_graph=False, create_graph=True):
inputs, inputs_spec =
|
tree
|
tree_flatten(inputs)
|
result = [torch.zeros_like(inp) for inp in inputs]
diff_argnums = tuple(i for i, inp in enumerate(inputs) if inp.requires_grad)
inputs = tuple(inputs[i] for i in diff_argnums)
if grad_outputs is None:
|
torch
|
torch.utils._pytree.tree_flatten
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests, is_iterable_of_tensors
import torch
from torch import Tensor
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_dtype import integral_types
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
from torch.utils._pytree import tree_flatten, tree_unflatten, tree_map
import torch.autograd.forward_ad as fwAD
def normalize_op_input_output2(f, args, kwargs, output_process_fn_grad=None, requires_grad=True):
flat_args, args_spec = tree_flatten(args)
diff_argnums = tuple(i for i, arg in enumerate(flat_args) if diff_arg(arg, requires_grad=requires_grad))
assert len(diff_argnums) > 0
primals = tuple(flat_args[i] for i in diff_argnums)
@functools.wraps(f)
def wrapped(*primals):
_args = list(flat_args)
for num, arg in zip(diff_argnums, primals):
_args[num] = arg
_args =
|
tree
|
tree_unflatten(_args, args_spec)
|
result = f(*_args, **kwargs)
if output_process_fn_grad is not None:
result = output_process_fn_grad(result)
if isinstance(result, tuple):
|
torch
|
torch.utils._pytree.tree_unflatten
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests, is_iterable_of_tensors
import torch
from torch import Tensor
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_dtype import integral_types
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
from torch.utils._pytree import tree_flatten, tree_unflatten, tree_map
import torch.autograd.forward_ad as fwAD
class TestDecompositionOpInfo(TestCase):
@unittest.skipIf(IS_FBCODE, "__torch_dispatch__ is buggy")
@ops(
functorch_lagging_op_db + additional_op_db,
allowed_dtypes=[torch.float32, torch.float64, torch.float16, torch.bfloat16] + [*integral_types()]
)
@skipOps('TestDecompositionOpInfo', 'test_decomposition', {
skip('view_as_complex'),
xfail('linalg.cholesky'),
xfail('linalg.inv'),
skip('linalg.det', 'singular', device_type='cuda'), # this is nasty and seems to stop the test suite
xfail('linalg.matrix_power'),
xfail('linalg.tensorinv'),
xfail('to_sparse'),
skip('tensor_split'),
skip('mvlgamma'),
skip('eig'),
skip('nn.functional.dropout'),
skip('_masked.softmin'),
skip('_masked.log_softmax'),
skip('stft'),
skip('_masked.softmax'),
skip('_masked.normalize'),
skip('resize_'),
})
def test_decomposition(self, device, dtype, op):
TEST_DTYPE = dtype
dtype_precisions = {
torch.float16: (0.001, 1e-5),
torch.bfloat16: (0.016, 1e-4),
torch.float32: (1.3e-6, 1e-5),
torch.float64: (1e-7, 1e-7),
torch.complex64: (1.3e-6, 1e-5),
torch.complex128: (1e-7, 1e-7),
}
def _getDefaultRtolAndAtol(dtype0, dtype1):
rtol = max(dtype_precisions.get(dtype0, (0, 0))[0],
dtype_precisions.get(dtype1, (0, 0))[0])
atol = max(dtype_precisions.get(dtype0, (0, 0))[1],
dtype_precisions.get(dtype1, (0, 0))[1])
return rtol, atol
def op_assert_ref(op, orig, decomp, ref):
if orig.numel() == 0:
return
orig_diff = (orig - ref).abs().max()
decomp_diff = (decomp - ref).abs().max()
atol = 1e-10
if decomp_diff > orig_diff + atol:
msg = (f"Difference from float64 is larger with decomposition {op.__name__}" +
f" than original. Original max diff: {orig_diff}, Decomp max diff: {decomp_diff}")
raise RuntimeError(msg)
def op_assert_equal(op, a, b):
assert a.dtype == b.dtype
rtol, atol = _getDefaultRtolAndAtol(a.dtype, b.dtype)
if not torch.allclose(a, b, rtol=rtol, atol=atol):
atol_diff = (a - b).abs().max()
rtol_diff = ((a - b).abs()/b.abs()).nan_to_num(0).max()
msg = f"{op.__name__} decomposition failed, max rel: {rtol_diff}, max abs: {atol_diff}"
raise RuntimeError(msg)
class DecompositionTensor(torch.Tensor):
elem: torch.Tensor
__slots__ = ['elem']
@staticmethod
def __new__(cls, elem):
r = torch.Tensor._make_wrapper_subclass(
cls, elem.size(),
strides=elem.stride(), storage_offset=elem.storage_offset(),
dtype=elem.dtype, layout=elem.layout,
device=elem.device, requires_grad=elem.requires_grad
)
r.elem = elem
return r
def __repr__(self):
return f"DecompositionTensor(elem={self.elem})"
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
global run_ops
run_ops.add(func)
def unwrap_tensor(e):
if isinstance(e, DecompositionTensor):
if not hasattr(e, 'elem'):
raise InplaceError()
return e.elem
return e
if func in decomposition_table and func not in [torch.ops.aten.detach, torch.ops.aten._s_where]:
dtype_arg_table = set([
aten._softmax_backward_data,
aten._log_softmax_backward_data,
])
decomposition = decomposition_table[func]
global run_decompositions
run_decompositions.add(func)
def upcast_tensor(x, dtype=torch.float32):
if isinstance(x, Tensor) and (x.dtype == torch.bfloat16 or x.dtype == torch.float16):
x = x.to(dtype=dtype)
FLOAT16_DTYPE = 5
BFLOAT16_DTYPE = 15
FLOAT64_DTYPE = 7
if isinstance(x, int) and func in dtype_arg_table and x in [FLOAT16_DTYPE, BFLOAT16_DTYPE]:
x = FLOAT64_DTYPE
return x
def call_op(func, map_fn, *args, **kwargs):
return tree_flatten(func(*tree_map(map_fn, args), **tree_map(map_fn, kwargs)))[0]
if TEST_DTYPE in [torch.float16, torch.bfloat16]:
decomp_out = call_op(decomposition, upcast_tensor, *args, **kwargs)
else:
decomp_out = call_op(decomposition, lambda x: x, *args, **kwargs)
real_out_double = call_op(func, lambda x: upcast_tensor(unwrap_tensor(x), dtype=torch.float64),
*args, **kwargs)
real_out = call_op(func, unwrap_tensor, *args, **kwargs)
assert(len(real_out) == len(decomp_out))
for orig, decomp, ref in zip(real_out, decomp_out, real_out_double):
orig = orig.to(dtype=TEST_DTYPE)
decomp = decomp.to(dtype=TEST_DTYPE)
if TEST_DTYPE in [torch.float16, torch.bfloat16]:
op_assert_ref(func, orig, decomp, ref)
else:
op_assert_equal(func, orig, decomp)
real_out = func(*tree_map(unwrap_tensor, args), **tree_map(unwrap_tensor, kwargs))
def wrap_tensor(e):
if e is None:
return DecompositionTensor(torch.empty(()))
return DecompositionTensor(e) if type(e) == torch.Tensor else e
wrapped_out = tree_map(wrap_tensor, real_out)
return wrapped_out
if TEST_DTYPE not in op.supported_dtypes(self.device_type):
self.skipTest("Dtype not in op's supported dtypes")
return
if is_inplace(op, op.get_op()):
self.skipTest("op is inplace")
return
_requires_grad = op.supports_autograd and TEST_DTYPE.is_floating_point
samples = op.sample_inputs(device, TEST_DTYPE, requires_grad=_requires_grad)
def wrap_tensor(x):
if type(x) == torch.Tensor:
return DecompositionTensor(x)
return x
try:
func = op.get_op()
for sample_input in samples:
if _requires_grad:
fn, primals = normalize_op_input_output(func, sample_input)
primals = tree_map(lambda x: x if isinstance(x, torch.Tensor) else x, primals)
decomp_out, decomp_vjp_fn = ref_vjp_no_create(fn, *tree_map(wrap_tensor, primals))
cotangents = tree_map(lambda x: torch.randn_like(x), decomp_out)
_ = decomp_vjp_fn(cotangents)
else:
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
_ = func(*args, **kwargs)
args = tree_map(wrap_tensor, args)
kwargs = tree_map(wrap_tensor, kwargs)
decomp_out = func(*args, **kwargs)
except InplaceError:
self.skipTest("op is inplace")
return
except RuntimeError as e:
if "not implemented for" in str(e):
self.skipTest(str(e))
return
if "Mismatch in shape: grad_output" in str(e):
self.skipTest("Some weird issue with autograd engine and tensor subclasses")
return
raise e
@unittest.skipIf(IS_FBCODE, "__torch_dispatch__ is buggy")
def test_placeholder(self):
global run_ops, run_decompositions
with open('op_analysis/run_ops.txt', 'w') as f:
def get_names(inpt):
return sorted([x.__name__ for x in inpt])
for op in get_names(run_ops):
f.write(f'{op}\n')
with open('op_analysis/run_decompositions.txt', 'w') as f:
for op in get_names(run_decompositions):
f.write(f'{op}\n')
def test_group_norm_backward(self, device):
def f(x, weight, bias, grad_out):
output =
|
F
|
F.group_norm(x, 6, weight, bias)
|
inputs = []
for input in (x, weight, bias):
if input.requires_grad:
inputs.append(input)
|
torch
|
torch.nn.functional.group_norm
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class EnableVmapFallbackWarnings:
def __enter__(self):
self.prev_state =
|
torch
|
torch._C._debug_only_are_vmap_fallback_warnings_enabled()
|
torch._C._debug_only_display_vmap_fallback_warnings(True)
def __exit__(self, *ignored):
torch._C._debug_only_display_vmap_fallback_warnings(self.prev_state)
|
torch
|
torch._C._debug_only_are_vmap_fallback_warnings_enabled
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapAPI(TestCase):
def test_non_tensor_output_raises(self):
with self.assertRaisesRegex(ValueError, "got type <class 'float'> as a return"):
vmap(lambda x: 3.14)(torch.ones(3))
def multiple_outputs(x):
return x, 3
with self.assertRaisesRegex(ValueError, "got type <class 'int'> as a return"):
vmap(multiple_outputs)(torch.ones(3))
def test_different_map_dim_size_raises(self):
x = torch.randn(2)
y = torch.randn(3)
expected_msg = 'Expected all tensors to have the same size in the mapped dimension'
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(torch.mul)(x, y)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
def test_func_with_no_inputs(self):
expected_msg = 'got no inputs'
def foo():
return torch.randn(3)
def bar(x):
return torch.randn(3)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(foo)()
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(bar)()
def test_constant_function(self):
output = vmap(lambda x: torch.tensor(3.14))(torch.ones(3))
self.assertEqual(output, torch.tensor([3.14, 3.14, 3.14]))
def test_single_input(self):
x = torch.randn(2, 3)
def square(x):
return x * x
output = vmap(square)(x)
self.assertEqual(output, x * x)
def test_multiple_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul)(x, y)
self.assertEqual(output, x * y)
def test_multiple_outputs(self):
def foo(x):
return x * x, x * x * x
x = torch.randn(3)
outputs = vmap(foo)(x)
self.assertEqual(outputs[0], x * x)
self.assertEqual(outputs[1], x * x * x)
def test_multiple_outputs2(self):
def returns_tuple_of_tensors(x):
return (x, x)
def returns_list_of_two_tensors(x):
return [x, x]
def returns_list_of_one_tensor(x):
return [x]
x = torch.randn(3)
vmap(returns_tuple_of_tensors)(x)
vmap(returns_list_of_two_tensors)(x)
vmap(returns_list_of_one_tensor)(x)
def test_nested_with_same_map_dim(self):
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
output = vmap(vmap(torch.mul))(x, y)
self.assertEqual(output, x * y)
output = vmap(vmap(vmap(torch.mul)))(x, y)
self.assertEqual(output, x * y)
def test_nested_with_diag_embed(self):
x = torch.randn(3, 3, 5)
output = vmap(vmap(torch.diag_embed))(x)
self.assertEqual(output, torch.diag_embed(x))
def test_nested_with_different_map_dim(self):
x = torch.randn(2, 3)
y = torch.randn(5, 3)
output = vmap(lambda x: vmap(lambda y: x * y)(y))(x)
self.assertEqual(output.shape, (2, 5, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
z = torch.randn(7, 3)
output = vmap(lambda x: vmap(lambda y: vmap(lambda z: x * y * z)(z))(y))(x)
self.assertEqual(output.shape, (2, 5, 7, 3))
self.assertEqual(output, x.view(2, 1, 1, 3) * y.view(5, 1, 3) * z)
def test_noop_in_inner_vmap(self):
x = torch.randn(3)
y = torch.randn(5)
output = vmap(lambda x: vmap(lambda y: x)(y))(x)
self.assertEqual(output, x.view(3, 1).expand(3, 5))
def test_unsupported_op_err_msg(self):
tensor = torch.randn(2, 3)
msg = (
r"Batching rule not implemented for aten::.+; the "
r"fallback path doesn't work on out= or view ops"
)
def out_op(x, y):
return torch.abs(x, out=y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(out_op)(tensor, tensor)
with self.assertRaisesRegex(RuntimeError, 'Batching rule not implemented'):
vmap(torch.equal)(tensor, tensor)
def test_nonzero_out_dims(self):
tensor = torch.randn(2, 3)
result = vmap(lambda x: x, out_dims=1)(tensor)
self.assertEqual(result, tensor.permute(1, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 0, 3))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=-1)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 3, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
other = torch.randn(2, 3, 5, 7)
result = vmap(lambda x, y: (x, y), out_dims=2)(tensor, other)
self.assertEqual(result, (tensor.permute(1, 2, 0, 3), other.permute(1, 2, 0, 3)))
ndims = 64
shape = [2] + [1] * (ndims - 1)
expected_shape = [1, 1, 2] + [1] * (ndims - 3)
tensor = torch.randn(shape)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result.shape, expected_shape)
def foo(x, y):
return x, x * y, x * y * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=1)(x, y)
self.assertEqual(
result,
(x.permute(1, 0, 2), (x * y).permute(1, 0, 2), (x * y * y).permute(1, 0, 2)))
def test_multiple_out_dims(self):
def foo(x):
return x, x
def bar(x, y):
return x, x, x, x * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=(0, 1))(x)
self.assertEqual(result, (x, x.permute(1, 0, 2)))
result = vmap(bar, out_dims=(-1, 0, 1, 2))(x, y)
expected = (
x.permute(1, 2, 0),
x,
x.permute(1, 0, 2),
(x * y).permute(1, 2, 0),
)
self.assertEqual(result, expected)
def test_nested_out_dims(self):
y = torch.randn(2, 3, 5, 7)
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y))(y)
self.assertEqual(result.shape, (2, 5, 3, 7))
self.assertEqual(result, y.permute(0, 2, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y), out_dims=1)(y)
self.assertEqual(result.shape, (5, 2, 3, 7))
self.assertEqual(result, y.permute(2, 0, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=-1)(y), out_dims=-1)(y)
self.assertEqual(result.shape, (5, 7, 3, 2))
self.assertEqual(result, y.permute(2, 3, 1, 0))
x = torch.randn(2, 3)
y = torch.randn(5, 3)
result = vmap(lambda y: vmap(lambda x: x * y, out_dims=1)(x), out_dims=-1)(y)
self.assertEqual(result.shape, (3, 2, 5))
self.assertEqual(result, (y.view(5, 1, 3) * x).permute(2, 1, 0))
def test_out_dims_edge_case(self):
def foo(x):
return x
tensor = torch.randn(2, 3)
expected = vmap(foo, out_dims=1)(tensor)
result = vmap(foo, out_dims=(1,))(tensor)
self.assertEqual(result, expected)
def test_pytree_returns(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y), [y, (y, y)]
y0, (y1, y2), (y3, (y4, y5)) = vmap(f)(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y0, y1)
self.assertEqual(y2, y1)
self.assertEqual(y2, y3)
self.assertEqual(y4, y3)
self.assertEqual(y5, y4)
def test_pytree_odict_returns(self):
x = torch.randn(2, 3)
def f(t):
y = t.sin()
return OrderedDict([("sin", y), ("cos", t.cos())])
out = vmap(f)(x)
assert isinstance(out, OrderedDict)
expected = f(x)
self.assertEqual(out["sin"], expected["sin"])
self.assertEqual(out["cos"], expected["cos"])
def test_pytest_odict_flatten_unflatten(self):
from functorch._src.vmap import _odict_flatten, _odict_unflatten
x = torch.randn(2, 3)
inpt = OrderedDict([("sin", x.sin()), ("cos", x.cos())])
out = _odict_flatten(inpt)
self.assertEqual(out[0], list(inpt.values()))
self.assertEqual(out[1], list(inpt.keys()))
recon_inpt = _odict_unflatten(*out)
self.assertEqual(recon_inpt, inpt)
def test_pytree_returns_outdims(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, (0, 1)))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, x.sin())
self.assertEqual(y2, x.sin().t())
def test_pytree_returns_broadcast_simple(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=1)(x)
self.assertEqual(y0, x.sin().t())
self.assertEqual(y1, y0)
self.assertEqual(y2, y0)
def test_pytree_returns_broadcast_nested(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, 1))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, y0.t())
self.assertEqual(y2, y0.t())
def test_out_dims_must_be_int_or_collection_of_int_err_msg(self):
msg = 'must be an int or a python collection of ints'
tensor = torch.randn(2, 3)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims='lol')(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=('lol',))(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=None)(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(None,))(tensor)
def test_out_dims_and_num_outputs_mismatch_err_msg(self):
msg = 'not compatible'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0, 0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x), out_dims=(0,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0))(x)
def test_out_dim_out_of_bounds_err_msg(self):
msg = 'Dimension out of range'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=3)(x)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=-4)(x)
def test_non_zero_in_dims(self):
tensor = torch.randn(2, 3, 5)
output = vmap(lambda x: x, (1,))(tensor)
self.assertEqual(output, tensor.permute(1, 0, 2))
self.assertEqual(output.data_ptr(), tensor.data_ptr())
x = torch.randn(2, 3)
y = torch.randn(3, 2)
output = vmap(torch.mul, (0, 1))(x, y)
self.assertEqual(output, x * y.t())
output = vmap(torch.mul, (1, 0))(x, y)
self.assertEqual(output, x.t() * y)
def test_none_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (0, None))(x, y)
self.assertEqual(output.shape, (2, 2, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
output = vmap(torch.mul, (0, None))(x, 2)
self.assertEqual(output, x * 2)
def test_nested_non_default_in_dims(self):
x = torch.rand(5, 2, 3)
y = torch.rand(3, 5, 2)
result = vmap(vmap(vmap(torch.mul), (1, 0)), (1, 2))(x, y)
self.assertEqual(result, x.permute(1, 2, 0) * y.permute(2, 0, 1))
def test_nested_negative_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (-1, -1))(x, y)
self.assertEqual(output.shape, (3, 2))
self.assertEqual(output, (x * y).permute(1, 0))
def test_non_default_in_dims_out_dims(self):
x = torch.randn(2, 3, 5)
result = vmap(lambda x: x, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x)
self.assertEqual(result.data_ptr(), x.data_ptr())
result = vmap(lambda x: x, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, x.transpose(1, 2))
self.assertEqual(result.data_ptr(), x.data_ptr())
def foo(x):
return x * 2
result = vmap(foo, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x * 2)
result = vmap(foo, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, (x * 2).transpose(1, 2))
result = vmap(vmap(foo, 1, 1), 1, 1)(x)
self.assertEqual(result, x * 2)
def test_item_throws(self):
def f(x):
return x.item()
with self.assertRaisesRegex(RuntimeError, r'item\(\) on a Tensor'):
vmap(f)(torch.randn(3))
def test_data_dependent_control_flow_throws(self):
def f(x):
if x:
return x
return 0
with self.assertRaisesRegex(RuntimeError, r'data-dependent control flow'):
vmap(f)(torch.randn(3))
def test_accepts_nested_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
out = vmap(lambda z: z[0] + z[1])((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1])([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'])({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=(0,))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out_fn = vmap(lambda z: z['x'][0] + z['x'][1][0] + z['y'][0] + z['y'][1])
out = out_fn({'x': [x, (x,)], 'y': [y, y]})
self.assertEqual(out, x + x + y + y)
def test_in_dims_wrong_type_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'expected `in_dims` to be int or a \(potentially nested\) tuple'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, [0, 0])(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, set({0, 0}))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, 'lol')(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=[0, 0])([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_not_enough_in_dims_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'in_dims is not compatible with the structure of `inputs`'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0,))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0, 0, 0))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0],))([x, y])
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_integer_in_dim_but_not_tensor_input_err_msg(self):
def foo(xy):
return xy[0] * xy[1]
def bar(x, yz):
return x * yz[0] * yz[1]
x = torch.randn(2, 3)
msg = 'Got in_dim=0 for an input but the input is of type'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum)(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum, (0, 0))(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, 1])
vmap(torch.sum, (0, None))(x, 0)
def test_in_dim_not_in_tensor_err_msg(self):
def foo(x):
return x * x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
msg = r'Got in_dim=-?\w for some input, but that input is a Tensor of dimensionality \w'
with self.assertRaisesRegex(ValueError, msg):
vmap(foo)(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(0,))(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(-3,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(2,))(y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([3, 0],))([x, y])
vmap(foo, in_dims=(0,))(torch.randn(2, 3))
vmap(foo, in_dims=(1,))(torch.randn(2, 3))
def test_fallback_does_not_warn_by_default(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
vmap(op)(x, y)
self.assertEqual(len(wa), 1)
@unittest.expectedFailure
def test_fallback_warns_when_warnings_are_enabled(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
with EnableVmapFallbackWarnings():
vmap(op)(x, y)
self.assertEqual(len(wa), 2)
self.assertRegex(str(wa[-1].message), FALLBACK_REGEX)
def _assert_uses_vmap_fallback(self, vmap_args, inputs):
return
def test_fallback_zero_dim(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0, B1 = 0, 3
x = torch.randn(B0, 11)
y = torch.randn(11)
msg = 'The fallback path does not support vmap over dims of size 0'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
x = torch.randn(B0, B1, 11)
y = torch.randn(B1, 11)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
def test_fallback_atan2(self):
op = torch.copysign
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(op, (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(100, 10, 10, 5)
y = torch.randn(100, 10, 10)
result = vmap(vmap(vmap(op)))(x, y)
self.assertEqual(result, op(x, y.view(100, 10, 10, 1)))
@unittest.skip
def test_fallback_masked_fill(self):
def run_test(batch_size):
B0 = batch_size
x = torch.randn(B0, 7, 11, 13)
dim = 0
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 13)
self._assert_uses_vmap_fallback((torch.index_add, (0, None, None, 0)), (x, dim, index, values))
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
expected = torch.index_add(
x, dim + 1, index, values.view(B0, 3, 1, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
run_test(batch_size=1237)
def test_fallback_multiple_returns(self):
B0, B1, B2 = 2, 3, 1237
tensor = torch.randn(B0, 10)
self._assert_uses_vmap_fallback((torch.var_mean,), (tensor,))
result = vmap(torch.var_mean)(tensor)
expected =
|
torch
|
torch.var_mean(tensor, dim=1)
|
self.assertEqual(result, expected)
tensor = torch.randn(B0, B1, 10)
|
torch
|
torch.var_mean
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapAPI(TestCase):
def test_non_tensor_output_raises(self):
with self.assertRaisesRegex(ValueError, "got type <class 'float'> as a return"):
vmap(lambda x: 3.14)(torch.ones(3))
def multiple_outputs(x):
return x, 3
with self.assertRaisesRegex(ValueError, "got type <class 'int'> as a return"):
vmap(multiple_outputs)(torch.ones(3))
def test_different_map_dim_size_raises(self):
x = torch.randn(2)
y = torch.randn(3)
expected_msg = 'Expected all tensors to have the same size in the mapped dimension'
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(torch.mul)(x, y)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
def test_func_with_no_inputs(self):
expected_msg = 'got no inputs'
def foo():
return torch.randn(3)
def bar(x):
return torch.randn(3)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(foo)()
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(bar)()
def test_constant_function(self):
output = vmap(lambda x: torch.tensor(3.14))(torch.ones(3))
self.assertEqual(output, torch.tensor([3.14, 3.14, 3.14]))
def test_single_input(self):
x = torch.randn(2, 3)
def square(x):
return x * x
output = vmap(square)(x)
self.assertEqual(output, x * x)
def test_multiple_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul)(x, y)
self.assertEqual(output, x * y)
def test_multiple_outputs(self):
def foo(x):
return x * x, x * x * x
x = torch.randn(3)
outputs = vmap(foo)(x)
self.assertEqual(outputs[0], x * x)
self.assertEqual(outputs[1], x * x * x)
def test_multiple_outputs2(self):
def returns_tuple_of_tensors(x):
return (x, x)
def returns_list_of_two_tensors(x):
return [x, x]
def returns_list_of_one_tensor(x):
return [x]
x = torch.randn(3)
vmap(returns_tuple_of_tensors)(x)
vmap(returns_list_of_two_tensors)(x)
vmap(returns_list_of_one_tensor)(x)
def test_nested_with_same_map_dim(self):
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
output = vmap(vmap(torch.mul))(x, y)
self.assertEqual(output, x * y)
output = vmap(vmap(vmap(torch.mul)))(x, y)
self.assertEqual(output, x * y)
def test_nested_with_diag_embed(self):
x = torch.randn(3, 3, 5)
output = vmap(vmap(torch.diag_embed))(x)
self.assertEqual(output, torch.diag_embed(x))
def test_nested_with_different_map_dim(self):
x = torch.randn(2, 3)
y = torch.randn(5, 3)
output = vmap(lambda x: vmap(lambda y: x * y)(y))(x)
self.assertEqual(output.shape, (2, 5, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
z = torch.randn(7, 3)
output = vmap(lambda x: vmap(lambda y: vmap(lambda z: x * y * z)(z))(y))(x)
self.assertEqual(output.shape, (2, 5, 7, 3))
self.assertEqual(output, x.view(2, 1, 1, 3) * y.view(5, 1, 3) * z)
def test_noop_in_inner_vmap(self):
x = torch.randn(3)
y = torch.randn(5)
output = vmap(lambda x: vmap(lambda y: x)(y))(x)
self.assertEqual(output, x.view(3, 1).expand(3, 5))
def test_unsupported_op_err_msg(self):
tensor = torch.randn(2, 3)
msg = (
r"Batching rule not implemented for aten::.+; the "
r"fallback path doesn't work on out= or view ops"
)
def out_op(x, y):
return torch.abs(x, out=y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(out_op)(tensor, tensor)
with self.assertRaisesRegex(RuntimeError, 'Batching rule not implemented'):
vmap(torch.equal)(tensor, tensor)
def test_nonzero_out_dims(self):
tensor = torch.randn(2, 3)
result = vmap(lambda x: x, out_dims=1)(tensor)
self.assertEqual(result, tensor.permute(1, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 0, 3))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=-1)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 3, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
other = torch.randn(2, 3, 5, 7)
result = vmap(lambda x, y: (x, y), out_dims=2)(tensor, other)
self.assertEqual(result, (tensor.permute(1, 2, 0, 3), other.permute(1, 2, 0, 3)))
ndims = 64
shape = [2] + [1] * (ndims - 1)
expected_shape = [1, 1, 2] + [1] * (ndims - 3)
tensor = torch.randn(shape)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result.shape, expected_shape)
def foo(x, y):
return x, x * y, x * y * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=1)(x, y)
self.assertEqual(
result,
(x.permute(1, 0, 2), (x * y).permute(1, 0, 2), (x * y * y).permute(1, 0, 2)))
def test_multiple_out_dims(self):
def foo(x):
return x, x
def bar(x, y):
return x, x, x, x * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=(0, 1))(x)
self.assertEqual(result, (x, x.permute(1, 0, 2)))
result = vmap(bar, out_dims=(-1, 0, 1, 2))(x, y)
expected = (
x.permute(1, 2, 0),
x,
x.permute(1, 0, 2),
(x * y).permute(1, 2, 0),
)
self.assertEqual(result, expected)
def test_nested_out_dims(self):
y = torch.randn(2, 3, 5, 7)
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y))(y)
self.assertEqual(result.shape, (2, 5, 3, 7))
self.assertEqual(result, y.permute(0, 2, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y), out_dims=1)(y)
self.assertEqual(result.shape, (5, 2, 3, 7))
self.assertEqual(result, y.permute(2, 0, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=-1)(y), out_dims=-1)(y)
self.assertEqual(result.shape, (5, 7, 3, 2))
self.assertEqual(result, y.permute(2, 3, 1, 0))
x = torch.randn(2, 3)
y = torch.randn(5, 3)
result = vmap(lambda y: vmap(lambda x: x * y, out_dims=1)(x), out_dims=-1)(y)
self.assertEqual(result.shape, (3, 2, 5))
self.assertEqual(result, (y.view(5, 1, 3) * x).permute(2, 1, 0))
def test_out_dims_edge_case(self):
def foo(x):
return x
tensor = torch.randn(2, 3)
expected = vmap(foo, out_dims=1)(tensor)
result = vmap(foo, out_dims=(1,))(tensor)
self.assertEqual(result, expected)
def test_pytree_returns(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y), [y, (y, y)]
y0, (y1, y2), (y3, (y4, y5)) = vmap(f)(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y0, y1)
self.assertEqual(y2, y1)
self.assertEqual(y2, y3)
self.assertEqual(y4, y3)
self.assertEqual(y5, y4)
def test_pytree_odict_returns(self):
x = torch.randn(2, 3)
def f(t):
y = t.sin()
return OrderedDict([("sin", y), ("cos", t.cos())])
out = vmap(f)(x)
assert isinstance(out, OrderedDict)
expected = f(x)
self.assertEqual(out["sin"], expected["sin"])
self.assertEqual(out["cos"], expected["cos"])
def test_pytest_odict_flatten_unflatten(self):
from functorch._src.vmap import _odict_flatten, _odict_unflatten
x = torch.randn(2, 3)
inpt = OrderedDict([("sin", x.sin()), ("cos", x.cos())])
out = _odict_flatten(inpt)
self.assertEqual(out[0], list(inpt.values()))
self.assertEqual(out[1], list(inpt.keys()))
recon_inpt = _odict_unflatten(*out)
self.assertEqual(recon_inpt, inpt)
def test_pytree_returns_outdims(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, (0, 1)))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, x.sin())
self.assertEqual(y2, x.sin().t())
def test_pytree_returns_broadcast_simple(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=1)(x)
self.assertEqual(y0, x.sin().t())
self.assertEqual(y1, y0)
self.assertEqual(y2, y0)
def test_pytree_returns_broadcast_nested(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, 1))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, y0.t())
self.assertEqual(y2, y0.t())
def test_out_dims_must_be_int_or_collection_of_int_err_msg(self):
msg = 'must be an int or a python collection of ints'
tensor = torch.randn(2, 3)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims='lol')(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=('lol',))(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=None)(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(None,))(tensor)
def test_out_dims_and_num_outputs_mismatch_err_msg(self):
msg = 'not compatible'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0, 0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x), out_dims=(0,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0))(x)
def test_out_dim_out_of_bounds_err_msg(self):
msg = 'Dimension out of range'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=3)(x)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=-4)(x)
def test_non_zero_in_dims(self):
tensor = torch.randn(2, 3, 5)
output = vmap(lambda x: x, (1,))(tensor)
self.assertEqual(output, tensor.permute(1, 0, 2))
self.assertEqual(output.data_ptr(), tensor.data_ptr())
x = torch.randn(2, 3)
y = torch.randn(3, 2)
output = vmap(torch.mul, (0, 1))(x, y)
self.assertEqual(output, x * y.t())
output = vmap(torch.mul, (1, 0))(x, y)
self.assertEqual(output, x.t() * y)
def test_none_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (0, None))(x, y)
self.assertEqual(output.shape, (2, 2, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
output = vmap(torch.mul, (0, None))(x, 2)
self.assertEqual(output, x * 2)
def test_nested_non_default_in_dims(self):
x = torch.rand(5, 2, 3)
y = torch.rand(3, 5, 2)
result = vmap(vmap(vmap(torch.mul), (1, 0)), (1, 2))(x, y)
self.assertEqual(result, x.permute(1, 2, 0) * y.permute(2, 0, 1))
def test_nested_negative_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (-1, -1))(x, y)
self.assertEqual(output.shape, (3, 2))
self.assertEqual(output, (x * y).permute(1, 0))
def test_non_default_in_dims_out_dims(self):
x = torch.randn(2, 3, 5)
result = vmap(lambda x: x, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x)
self.assertEqual(result.data_ptr(), x.data_ptr())
result = vmap(lambda x: x, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, x.transpose(1, 2))
self.assertEqual(result.data_ptr(), x.data_ptr())
def foo(x):
return x * 2
result = vmap(foo, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x * 2)
result = vmap(foo, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, (x * 2).transpose(1, 2))
result = vmap(vmap(foo, 1, 1), 1, 1)(x)
self.assertEqual(result, x * 2)
def test_item_throws(self):
def f(x):
return x.item()
with self.assertRaisesRegex(RuntimeError, r'item\(\) on a Tensor'):
vmap(f)(torch.randn(3))
def test_data_dependent_control_flow_throws(self):
def f(x):
if x:
return x
return 0
with self.assertRaisesRegex(RuntimeError, r'data-dependent control flow'):
vmap(f)(torch.randn(3))
def test_accepts_nested_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
out = vmap(lambda z: z[0] + z[1])((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1])([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'])({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=(0,))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out_fn = vmap(lambda z: z['x'][0] + z['x'][1][0] + z['y'][0] + z['y'][1])
out = out_fn({'x': [x, (x,)], 'y': [y, y]})
self.assertEqual(out, x + x + y + y)
def test_in_dims_wrong_type_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'expected `in_dims` to be int or a \(potentially nested\) tuple'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, [0, 0])(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, set({0, 0}))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, 'lol')(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=[0, 0])([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_not_enough_in_dims_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'in_dims is not compatible with the structure of `inputs`'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0,))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0, 0, 0))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0],))([x, y])
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_integer_in_dim_but_not_tensor_input_err_msg(self):
def foo(xy):
return xy[0] * xy[1]
def bar(x, yz):
return x * yz[0] * yz[1]
x = torch.randn(2, 3)
msg = 'Got in_dim=0 for an input but the input is of type'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum)(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum, (0, 0))(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, 1])
vmap(torch.sum, (0, None))(x, 0)
def test_in_dim_not_in_tensor_err_msg(self):
def foo(x):
return x * x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
msg = r'Got in_dim=-?\w for some input, but that input is a Tensor of dimensionality \w'
with self.assertRaisesRegex(ValueError, msg):
vmap(foo)(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(0,))(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(-3,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(2,))(y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([3, 0],))([x, y])
vmap(foo, in_dims=(0,))(torch.randn(2, 3))
vmap(foo, in_dims=(1,))(torch.randn(2, 3))
def test_fallback_does_not_warn_by_default(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
vmap(op)(x, y)
self.assertEqual(len(wa), 1)
@unittest.expectedFailure
def test_fallback_warns_when_warnings_are_enabled(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
with EnableVmapFallbackWarnings():
vmap(op)(x, y)
self.assertEqual(len(wa), 2)
self.assertRegex(str(wa[-1].message), FALLBACK_REGEX)
def _assert_uses_vmap_fallback(self, vmap_args, inputs):
return
def test_fallback_zero_dim(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0, B1 = 0, 3
x = torch.randn(B0, 11)
y = torch.randn(11)
msg = 'The fallback path does not support vmap over dims of size 0'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
x = torch.randn(B0, B1, 11)
y = torch.randn(B1, 11)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
def test_fallback_atan2(self):
op = torch.copysign
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(op, (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(100, 10, 10, 5)
y = torch.randn(100, 10, 10)
result = vmap(vmap(vmap(op)))(x, y)
self.assertEqual(result, op(x, y.view(100, 10, 10, 1)))
@unittest.skip
def test_fallback_masked_fill(self):
def run_test(batch_size):
B0 = batch_size
x = torch.randn(B0, 7, 11, 13)
dim = 0
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 13)
self._assert_uses_vmap_fallback((torch.index_add, (0, None, None, 0)), (x, dim, index, values))
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
expected = torch.index_add(
x, dim + 1, index, values.view(B0, 3, 1, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
run_test(batch_size=1237)
def test_fallback_multiple_returns(self):
B0, B1, B2 = 2, 3, 1237
tensor = torch.randn(B0, 10)
self._assert_uses_vmap_fallback((torch.var_mean,), (tensor,))
result = vmap(torch.var_mean)(tensor)
expected = torch.var_mean(tensor, dim=1)
self.assertEqual(result, expected)
tensor = torch.randn(B0, B1, 10)
result = vmap(vmap(torch.var_mean))(tensor)
expected = torch.var_mean(tensor, dim=2)
self.assertEqual(result, expected)
tensor = torch.randn(B0, B1, B2, 10)
result = vmap(vmap(vmap(torch.var_mean)))(tensor)
expected = torch.var_mean(tensor, dim=3)
self.assertEqual(result, expected)
def test_inplace_fallback_unary(self):
op = Tensor.acos_
B0, B1, B2 = 2, 3, 10000
x = torch.randn(B0, 5)
self._assert_uses_vmap_fallback((op,), (x,))
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op)(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op, out_dims=(1,))(x)
self.assertTrue(result._base is x)
self.assertEqual(result, x_orig.t().acos())
x_orig = torch.randn(B0, B1, 5)
x = x_orig.clone()
result = vmap(vmap(op))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
result = vmap(vmap(vmap(op)))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
def test_inplace_fallback_nary_same_levels(self):
op = Tensor.atan2_
outplace_op = torch.atan2
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0 = 5
x_orig = torch.randn(7, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, 7, 11)
vmap(op, (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim(0, 2)))
B0, B1 = 5, 7
x_orig = torch.randn(B1, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, B1, 11)
vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim([0, 1], [2, 0])))
B0, B1, B2 = 100, 10, 10
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
y = torch.randn(B0, B1, B2)
vmap(vmap(vmap(op)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, B1, B2, 1)))
@unittest.expectedFailure
def test_inplace_fallback_nary_different_levels(self):
op = Tensor.atan2_
outplace_op = torch.atan2
B0, B1 = 2, 3
x = torch.rand(B0, 7)
y = torch.rand(7)
self._assert_uses_vmap_fallback((op, (0, None)), (x, y))
x_orig = torch.rand(B0, 7)
x = x_orig.clone()
y = torch.rand(7)
vmap(op, in_dims=(0, None))(x, y)
self.assertEqual(x, outplace_op(x_orig, y))
x_orig = torch.rand(B0, B1, 7)
x = x_orig.clone()
y = torch.rand(B0, 7)
vmap(vmap(op, in_dims=(0, None)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, 1, 7)))
msg = r'vmap: aten::atan2_\(self, \*extra_args\) is not possible'
x = torch.rand(7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(7, B0)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 1))(x, y)
x = torch.rand(B0, 7)
y = torch.rand(B0, B1, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(None, 0)))(x, y)
def test_backward_unsupported_interaction(self):
x = torch.randn(3, requires_grad=True)
y = torch.randn(5)
grad =
|
torch
|
torch.randn_like(x)
|
err_msg = r'backward\(\) called inside a functorch transform'
def backward_on_vmapped_tensor(x):
x.sum().backward()
|
torch
|
torch.randn_like
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapAPI(TestCase):
def test_non_tensor_output_raises(self):
with self.assertRaisesRegex(ValueError, "got type <class 'float'> as a return"):
vmap(lambda x: 3.14)(torch.ones(3))
def multiple_outputs(x):
return x, 3
with self.assertRaisesRegex(ValueError, "got type <class 'int'> as a return"):
vmap(multiple_outputs)(torch.ones(3))
def test_different_map_dim_size_raises(self):
x = torch.randn(2)
y = torch.randn(3)
expected_msg = 'Expected all tensors to have the same size in the mapped dimension'
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(torch.mul)(x, y)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
def test_func_with_no_inputs(self):
expected_msg = 'got no inputs'
def foo():
return torch.randn(3)
def bar(x):
return torch.randn(3)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(foo)()
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(bar)()
def test_constant_function(self):
output = vmap(lambda x: torch.tensor(3.14))(torch.ones(3))
self.assertEqual(output, torch.tensor([3.14, 3.14, 3.14]))
def test_single_input(self):
x = torch.randn(2, 3)
def square(x):
return x * x
output = vmap(square)(x)
self.assertEqual(output, x * x)
def test_multiple_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul)(x, y)
self.assertEqual(output, x * y)
def test_multiple_outputs(self):
def foo(x):
return x * x, x * x * x
x = torch.randn(3)
outputs = vmap(foo)(x)
self.assertEqual(outputs[0], x * x)
self.assertEqual(outputs[1], x * x * x)
def test_multiple_outputs2(self):
def returns_tuple_of_tensors(x):
return (x, x)
def returns_list_of_two_tensors(x):
return [x, x]
def returns_list_of_one_tensor(x):
return [x]
x = torch.randn(3)
vmap(returns_tuple_of_tensors)(x)
vmap(returns_list_of_two_tensors)(x)
vmap(returns_list_of_one_tensor)(x)
def test_nested_with_same_map_dim(self):
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
output = vmap(vmap(torch.mul))(x, y)
self.assertEqual(output, x * y)
output = vmap(vmap(vmap(torch.mul)))(x, y)
self.assertEqual(output, x * y)
def test_nested_with_diag_embed(self):
x = torch.randn(3, 3, 5)
output = vmap(vmap(torch.diag_embed))(x)
self.assertEqual(output, torch.diag_embed(x))
def test_nested_with_different_map_dim(self):
x = torch.randn(2, 3)
y = torch.randn(5, 3)
output = vmap(lambda x: vmap(lambda y: x * y)(y))(x)
self.assertEqual(output.shape, (2, 5, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
z = torch.randn(7, 3)
output = vmap(lambda x: vmap(lambda y: vmap(lambda z: x * y * z)(z))(y))(x)
self.assertEqual(output.shape, (2, 5, 7, 3))
self.assertEqual(output, x.view(2, 1, 1, 3) * y.view(5, 1, 3) * z)
def test_noop_in_inner_vmap(self):
x = torch.randn(3)
y = torch.randn(5)
output = vmap(lambda x: vmap(lambda y: x)(y))(x)
self.assertEqual(output, x.view(3, 1).expand(3, 5))
def test_unsupported_op_err_msg(self):
tensor = torch.randn(2, 3)
msg = (
r"Batching rule not implemented for aten::.+; the "
r"fallback path doesn't work on out= or view ops"
)
def out_op(x, y):
return torch.abs(x, out=y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(out_op)(tensor, tensor)
with self.assertRaisesRegex(RuntimeError, 'Batching rule not implemented'):
vmap(torch.equal)(tensor, tensor)
def test_nonzero_out_dims(self):
tensor = torch.randn(2, 3)
result = vmap(lambda x: x, out_dims=1)(tensor)
self.assertEqual(result, tensor.permute(1, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 0, 3))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=-1)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 3, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
other = torch.randn(2, 3, 5, 7)
result = vmap(lambda x, y: (x, y), out_dims=2)(tensor, other)
self.assertEqual(result, (tensor.permute(1, 2, 0, 3), other.permute(1, 2, 0, 3)))
ndims = 64
shape = [2] + [1] * (ndims - 1)
expected_shape = [1, 1, 2] + [1] * (ndims - 3)
tensor = torch.randn(shape)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result.shape, expected_shape)
def foo(x, y):
return x, x * y, x * y * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=1)(x, y)
self.assertEqual(
result,
(x.permute(1, 0, 2), (x * y).permute(1, 0, 2), (x * y * y).permute(1, 0, 2)))
def test_multiple_out_dims(self):
def foo(x):
return x, x
def bar(x, y):
return x, x, x, x * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=(0, 1))(x)
self.assertEqual(result, (x, x.permute(1, 0, 2)))
result = vmap(bar, out_dims=(-1, 0, 1, 2))(x, y)
expected = (
x.permute(1, 2, 0),
x,
x.permute(1, 0, 2),
(x * y).permute(1, 2, 0),
)
self.assertEqual(result, expected)
def test_nested_out_dims(self):
y = torch.randn(2, 3, 5, 7)
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y))(y)
self.assertEqual(result.shape, (2, 5, 3, 7))
self.assertEqual(result, y.permute(0, 2, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y), out_dims=1)(y)
self.assertEqual(result.shape, (5, 2, 3, 7))
self.assertEqual(result, y.permute(2, 0, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=-1)(y), out_dims=-1)(y)
self.assertEqual(result.shape, (5, 7, 3, 2))
self.assertEqual(result, y.permute(2, 3, 1, 0))
x = torch.randn(2, 3)
y = torch.randn(5, 3)
result = vmap(lambda y: vmap(lambda x: x * y, out_dims=1)(x), out_dims=-1)(y)
self.assertEqual(result.shape, (3, 2, 5))
self.assertEqual(result, (y.view(5, 1, 3) * x).permute(2, 1, 0))
def test_out_dims_edge_case(self):
def foo(x):
return x
tensor = torch.randn(2, 3)
expected = vmap(foo, out_dims=1)(tensor)
result = vmap(foo, out_dims=(1,))(tensor)
self.assertEqual(result, expected)
def test_pytree_returns(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y), [y, (y, y)]
y0, (y1, y2), (y3, (y4, y5)) = vmap(f)(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y0, y1)
self.assertEqual(y2, y1)
self.assertEqual(y2, y3)
self.assertEqual(y4, y3)
self.assertEqual(y5, y4)
def test_pytree_odict_returns(self):
x = torch.randn(2, 3)
def f(t):
y = t.sin()
return OrderedDict([("sin", y), ("cos", t.cos())])
out = vmap(f)(x)
assert isinstance(out, OrderedDict)
expected = f(x)
self.assertEqual(out["sin"], expected["sin"])
self.assertEqual(out["cos"], expected["cos"])
def test_pytest_odict_flatten_unflatten(self):
from functorch._src.vmap import _odict_flatten, _odict_unflatten
x = torch.randn(2, 3)
inpt = OrderedDict([("sin", x.sin()), ("cos", x.cos())])
out = _odict_flatten(inpt)
self.assertEqual(out[0], list(inpt.values()))
self.assertEqual(out[1], list(inpt.keys()))
recon_inpt = _odict_unflatten(*out)
self.assertEqual(recon_inpt, inpt)
def test_pytree_returns_outdims(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, (0, 1)))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, x.sin())
self.assertEqual(y2, x.sin().t())
def test_pytree_returns_broadcast_simple(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=1)(x)
self.assertEqual(y0, x.sin().t())
self.assertEqual(y1, y0)
self.assertEqual(y2, y0)
def test_pytree_returns_broadcast_nested(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, 1))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, y0.t())
self.assertEqual(y2, y0.t())
def test_out_dims_must_be_int_or_collection_of_int_err_msg(self):
msg = 'must be an int or a python collection of ints'
tensor = torch.randn(2, 3)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims='lol')(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=('lol',))(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=None)(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(None,))(tensor)
def test_out_dims_and_num_outputs_mismatch_err_msg(self):
msg = 'not compatible'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0, 0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x), out_dims=(0,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0))(x)
def test_out_dim_out_of_bounds_err_msg(self):
msg = 'Dimension out of range'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=3)(x)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=-4)(x)
def test_non_zero_in_dims(self):
tensor = torch.randn(2, 3, 5)
output = vmap(lambda x: x, (1,))(tensor)
self.assertEqual(output, tensor.permute(1, 0, 2))
self.assertEqual(output.data_ptr(), tensor.data_ptr())
x = torch.randn(2, 3)
y = torch.randn(3, 2)
output = vmap(torch.mul, (0, 1))(x, y)
self.assertEqual(output, x * y.t())
output = vmap(torch.mul, (1, 0))(x, y)
self.assertEqual(output, x.t() * y)
def test_none_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (0, None))(x, y)
self.assertEqual(output.shape, (2, 2, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
output = vmap(torch.mul, (0, None))(x, 2)
self.assertEqual(output, x * 2)
def test_nested_non_default_in_dims(self):
x = torch.rand(5, 2, 3)
y = torch.rand(3, 5, 2)
result = vmap(vmap(vmap(torch.mul), (1, 0)), (1, 2))(x, y)
self.assertEqual(result, x.permute(1, 2, 0) * y.permute(2, 0, 1))
def test_nested_negative_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (-1, -1))(x, y)
self.assertEqual(output.shape, (3, 2))
self.assertEqual(output, (x * y).permute(1, 0))
def test_non_default_in_dims_out_dims(self):
x = torch.randn(2, 3, 5)
result = vmap(lambda x: x, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x)
self.assertEqual(result.data_ptr(), x.data_ptr())
result = vmap(lambda x: x, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, x.transpose(1, 2))
self.assertEqual(result.data_ptr(), x.data_ptr())
def foo(x):
return x * 2
result = vmap(foo, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x * 2)
result = vmap(foo, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, (x * 2).transpose(1, 2))
result = vmap(vmap(foo, 1, 1), 1, 1)(x)
self.assertEqual(result, x * 2)
def test_item_throws(self):
def f(x):
return x.item()
with self.assertRaisesRegex(RuntimeError, r'item\(\) on a Tensor'):
vmap(f)(torch.randn(3))
def test_data_dependent_control_flow_throws(self):
def f(x):
if x:
return x
return 0
with self.assertRaisesRegex(RuntimeError, r'data-dependent control flow'):
vmap(f)(torch.randn(3))
def test_accepts_nested_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
out = vmap(lambda z: z[0] + z[1])((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1])([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'])({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=(0,))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out_fn = vmap(lambda z: z['x'][0] + z['x'][1][0] + z['y'][0] + z['y'][1])
out = out_fn({'x': [x, (x,)], 'y': [y, y]})
self.assertEqual(out, x + x + y + y)
def test_in_dims_wrong_type_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'expected `in_dims` to be int or a \(potentially nested\) tuple'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, [0, 0])(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, set({0, 0}))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, 'lol')(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=[0, 0])([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_not_enough_in_dims_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'in_dims is not compatible with the structure of `inputs`'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0,))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0, 0, 0))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0],))([x, y])
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_integer_in_dim_but_not_tensor_input_err_msg(self):
def foo(xy):
return xy[0] * xy[1]
def bar(x, yz):
return x * yz[0] * yz[1]
x = torch.randn(2, 3)
msg = 'Got in_dim=0 for an input but the input is of type'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum)(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum, (0, 0))(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, 1])
vmap(torch.sum, (0, None))(x, 0)
def test_in_dim_not_in_tensor_err_msg(self):
def foo(x):
return x * x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
msg = r'Got in_dim=-?\w for some input, but that input is a Tensor of dimensionality \w'
with self.assertRaisesRegex(ValueError, msg):
vmap(foo)(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(0,))(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(-3,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(2,))(y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([3, 0],))([x, y])
vmap(foo, in_dims=(0,))(torch.randn(2, 3))
vmap(foo, in_dims=(1,))(torch.randn(2, 3))
def test_fallback_does_not_warn_by_default(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
vmap(op)(x, y)
self.assertEqual(len(wa), 1)
@unittest.expectedFailure
def test_fallback_warns_when_warnings_are_enabled(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
with EnableVmapFallbackWarnings():
vmap(op)(x, y)
self.assertEqual(len(wa), 2)
self.assertRegex(str(wa[-1].message), FALLBACK_REGEX)
def _assert_uses_vmap_fallback(self, vmap_args, inputs):
return
def test_fallback_zero_dim(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0, B1 = 0, 3
x = torch.randn(B0, 11)
y = torch.randn(11)
msg = 'The fallback path does not support vmap over dims of size 0'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
x = torch.randn(B0, B1, 11)
y = torch.randn(B1, 11)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
def test_fallback_atan2(self):
op = torch.copysign
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(op, (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(100, 10, 10, 5)
y = torch.randn(100, 10, 10)
result = vmap(vmap(vmap(op)))(x, y)
self.assertEqual(result, op(x, y.view(100, 10, 10, 1)))
@unittest.skip
def test_fallback_masked_fill(self):
def run_test(batch_size):
B0 = batch_size
x = torch.randn(B0, 7, 11, 13)
dim = 0
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 13)
self._assert_uses_vmap_fallback((torch.index_add, (0, None, None, 0)), (x, dim, index, values))
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
expected = torch.index_add(
x, dim + 1, index, values.view(B0, 3, 1, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
run_test(batch_size=1237)
def test_fallback_multiple_returns(self):
B0, B1, B2 = 2, 3, 1237
tensor = torch.randn(B0, 10)
self._assert_uses_vmap_fallback((torch.var_mean,), (tensor,))
result = vmap(torch.var_mean)(tensor)
expected = torch.var_mean(tensor, dim=1)
self.assertEqual(result, expected)
tensor = torch.randn(B0, B1, 10)
result = vmap(vmap(torch.var_mean))(tensor)
expected = torch.var_mean(tensor, dim=2)
self.assertEqual(result, expected)
tensor = torch.randn(B0, B1, B2, 10)
result = vmap(vmap(vmap(torch.var_mean)))(tensor)
expected = torch.var_mean(tensor, dim=3)
self.assertEqual(result, expected)
def test_inplace_fallback_unary(self):
op = Tensor.acos_
B0, B1, B2 = 2, 3, 10000
x = torch.randn(B0, 5)
self._assert_uses_vmap_fallback((op,), (x,))
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op)(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op, out_dims=(1,))(x)
self.assertTrue(result._base is x)
self.assertEqual(result, x_orig.t().acos())
x_orig = torch.randn(B0, B1, 5)
x = x_orig.clone()
result = vmap(vmap(op))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
result = vmap(vmap(vmap(op)))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
def test_inplace_fallback_nary_same_levels(self):
op = Tensor.atan2_
outplace_op = torch.atan2
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0 = 5
x_orig = torch.randn(7, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, 7, 11)
vmap(op, (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim(0, 2)))
B0, B1 = 5, 7
x_orig = torch.randn(B1, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, B1, 11)
vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim([0, 1], [2, 0])))
B0, B1, B2 = 100, 10, 10
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
y = torch.randn(B0, B1, B2)
vmap(vmap(vmap(op)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, B1, B2, 1)))
@unittest.expectedFailure
def test_inplace_fallback_nary_different_levels(self):
op = Tensor.atan2_
outplace_op = torch.atan2
B0, B1 = 2, 3
x = torch.rand(B0, 7)
y = torch.rand(7)
self._assert_uses_vmap_fallback((op, (0, None)), (x, y))
x_orig = torch.rand(B0, 7)
x = x_orig.clone()
y = torch.rand(7)
vmap(op, in_dims=(0, None))(x, y)
self.assertEqual(x, outplace_op(x_orig, y))
x_orig = torch.rand(B0, B1, 7)
x = x_orig.clone()
y = torch.rand(B0, 7)
vmap(vmap(op, in_dims=(0, None)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, 1, 7)))
msg = r'vmap: aten::atan2_\(self, \*extra_args\) is not possible'
x = torch.rand(7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(7, B0)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 1))(x, y)
x = torch.rand(B0, 7)
y = torch.rand(B0, B1, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(None, 0)))(x, y)
def test_backward_unsupported_interaction(self):
x = torch.randn(3, requires_grad=True)
y = torch.randn(5)
grad = torch.randn_like(x)
err_msg = r'backward\(\) called inside a functorch transform'
def backward_on_vmapped_tensor(x):
x.sum().backward()
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_on_vmapped_tensor)(x)
def backward_with_vmapped_grad(x, grad):
x.backward(grad)
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_with_vmapped_grad)(x, grad)
def completely_unrelated_backward(y):
x.sum().backward()
return y
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(completely_unrelated_backward)(y)
@unittest.expectedFailure
def test_grad_unsupported_interaction(self):
input_tensor = torch.randn(3, requires_grad=True)
err_msg = 'autograd.grad.* called inside torch.vmap'
captured = torch.randn(3, requires_grad=True)
def output_to_grad_is_vmapped(input_tensor):
output = (captured * input_tensor).sum()
return torch.autograd.grad([output], [captured])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(output_to_grad_is_vmapped)(input_tensor)
output = (input_tensor ** 2).sum()
def input_to_grad_is_vmapped(input_tensor):
return torch.autograd.grad([output], [input_tensor])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(input_to_grad_is_vmapped)(input_tensor)
def test_batched_gradient_basic(self):
N = 3
x = torch.randn(N, requires_grad=True)
y = torch.randn(N)
def vjp_mul(v):
return torch.autograd.grad([x * y], [x], grad_outputs=[v])[0]
batched_v =
|
torch
|
torch.eye(N)
|
jacobian = vmap(vjp_mul)(batched_v)
self.assertEqual(jacobian, torch.diagflat(y))
def test_functools_partial(self):
|
torch
|
torch.eye
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapAPI(TestCase):
def test_non_tensor_output_raises(self):
with self.assertRaisesRegex(ValueError, "got type <class 'float'> as a return"):
vmap(lambda x: 3.14)(torch.ones(3))
def multiple_outputs(x):
return x, 3
with self.assertRaisesRegex(ValueError, "got type <class 'int'> as a return"):
vmap(multiple_outputs)(torch.ones(3))
def test_different_map_dim_size_raises(self):
x = torch.randn(2)
y = torch.randn(3)
expected_msg = 'Expected all tensors to have the same size in the mapped dimension'
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(torch.mul)(x, y)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
def test_func_with_no_inputs(self):
expected_msg = 'got no inputs'
def foo():
return torch.randn(3)
def bar(x):
return torch.randn(3)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(foo)()
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(bar)()
def test_constant_function(self):
output = vmap(lambda x: torch.tensor(3.14))(torch.ones(3))
self.assertEqual(output, torch.tensor([3.14, 3.14, 3.14]))
def test_single_input(self):
x = torch.randn(2, 3)
def square(x):
return x * x
output = vmap(square)(x)
self.assertEqual(output, x * x)
def test_multiple_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul)(x, y)
self.assertEqual(output, x * y)
def test_multiple_outputs(self):
def foo(x):
return x * x, x * x * x
x = torch.randn(3)
outputs = vmap(foo)(x)
self.assertEqual(outputs[0], x * x)
self.assertEqual(outputs[1], x * x * x)
def test_multiple_outputs2(self):
def returns_tuple_of_tensors(x):
return (x, x)
def returns_list_of_two_tensors(x):
return [x, x]
def returns_list_of_one_tensor(x):
return [x]
x = torch.randn(3)
vmap(returns_tuple_of_tensors)(x)
vmap(returns_list_of_two_tensors)(x)
vmap(returns_list_of_one_tensor)(x)
def test_nested_with_same_map_dim(self):
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
output = vmap(vmap(torch.mul))(x, y)
self.assertEqual(output, x * y)
output = vmap(vmap(vmap(torch.mul)))(x, y)
self.assertEqual(output, x * y)
def test_nested_with_diag_embed(self):
x = torch.randn(3, 3, 5)
output = vmap(vmap(torch.diag_embed))(x)
self.assertEqual(output, torch.diag_embed(x))
def test_nested_with_different_map_dim(self):
x = torch.randn(2, 3)
y = torch.randn(5, 3)
output = vmap(lambda x: vmap(lambda y: x * y)(y))(x)
self.assertEqual(output.shape, (2, 5, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
z = torch.randn(7, 3)
output = vmap(lambda x: vmap(lambda y: vmap(lambda z: x * y * z)(z))(y))(x)
self.assertEqual(output.shape, (2, 5, 7, 3))
self.assertEqual(output, x.view(2, 1, 1, 3) * y.view(5, 1, 3) * z)
def test_noop_in_inner_vmap(self):
x = torch.randn(3)
y = torch.randn(5)
output = vmap(lambda x: vmap(lambda y: x)(y))(x)
self.assertEqual(output, x.view(3, 1).expand(3, 5))
def test_unsupported_op_err_msg(self):
tensor = torch.randn(2, 3)
msg = (
r"Batching rule not implemented for aten::.+; the "
r"fallback path doesn't work on out= or view ops"
)
def out_op(x, y):
return torch.abs(x, out=y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(out_op)(tensor, tensor)
with self.assertRaisesRegex(RuntimeError, 'Batching rule not implemented'):
vmap(torch.equal)(tensor, tensor)
def test_nonzero_out_dims(self):
tensor = torch.randn(2, 3)
result = vmap(lambda x: x, out_dims=1)(tensor)
self.assertEqual(result, tensor.permute(1, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 0, 3))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=-1)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 3, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
other = torch.randn(2, 3, 5, 7)
result = vmap(lambda x, y: (x, y), out_dims=2)(tensor, other)
self.assertEqual(result, (tensor.permute(1, 2, 0, 3), other.permute(1, 2, 0, 3)))
ndims = 64
shape = [2] + [1] * (ndims - 1)
expected_shape = [1, 1, 2] + [1] * (ndims - 3)
tensor = torch.randn(shape)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result.shape, expected_shape)
def foo(x, y):
return x, x * y, x * y * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=1)(x, y)
self.assertEqual(
result,
(x.permute(1, 0, 2), (x * y).permute(1, 0, 2), (x * y * y).permute(1, 0, 2)))
def test_multiple_out_dims(self):
def foo(x):
return x, x
def bar(x, y):
return x, x, x, x * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=(0, 1))(x)
self.assertEqual(result, (x, x.permute(1, 0, 2)))
result = vmap(bar, out_dims=(-1, 0, 1, 2))(x, y)
expected = (
x.permute(1, 2, 0),
x,
x.permute(1, 0, 2),
(x * y).permute(1, 2, 0),
)
self.assertEqual(result, expected)
def test_nested_out_dims(self):
y = torch.randn(2, 3, 5, 7)
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y))(y)
self.assertEqual(result.shape, (2, 5, 3, 7))
self.assertEqual(result, y.permute(0, 2, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y), out_dims=1)(y)
self.assertEqual(result.shape, (5, 2, 3, 7))
self.assertEqual(result, y.permute(2, 0, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=-1)(y), out_dims=-1)(y)
self.assertEqual(result.shape, (5, 7, 3, 2))
self.assertEqual(result, y.permute(2, 3, 1, 0))
x = torch.randn(2, 3)
y = torch.randn(5, 3)
result = vmap(lambda y: vmap(lambda x: x * y, out_dims=1)(x), out_dims=-1)(y)
self.assertEqual(result.shape, (3, 2, 5))
self.assertEqual(result, (y.view(5, 1, 3) * x).permute(2, 1, 0))
def test_out_dims_edge_case(self):
def foo(x):
return x
tensor = torch.randn(2, 3)
expected = vmap(foo, out_dims=1)(tensor)
result = vmap(foo, out_dims=(1,))(tensor)
self.assertEqual(result, expected)
def test_pytree_returns(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y), [y, (y, y)]
y0, (y1, y2), (y3, (y4, y5)) = vmap(f)(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y0, y1)
self.assertEqual(y2, y1)
self.assertEqual(y2, y3)
self.assertEqual(y4, y3)
self.assertEqual(y5, y4)
def test_pytree_odict_returns(self):
x = torch.randn(2, 3)
def f(t):
y = t.sin()
return OrderedDict([("sin", y), ("cos", t.cos())])
out = vmap(f)(x)
assert isinstance(out, OrderedDict)
expected = f(x)
self.assertEqual(out["sin"], expected["sin"])
self.assertEqual(out["cos"], expected["cos"])
def test_pytest_odict_flatten_unflatten(self):
from functorch._src.vmap import _odict_flatten, _odict_unflatten
x = torch.randn(2, 3)
inpt = OrderedDict([("sin", x.sin()), ("cos", x.cos())])
out = _odict_flatten(inpt)
self.assertEqual(out[0], list(inpt.values()))
self.assertEqual(out[1], list(inpt.keys()))
recon_inpt = _odict_unflatten(*out)
self.assertEqual(recon_inpt, inpt)
def test_pytree_returns_outdims(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, (0, 1)))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, x.sin())
self.assertEqual(y2, x.sin().t())
def test_pytree_returns_broadcast_simple(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=1)(x)
self.assertEqual(y0, x.sin().t())
self.assertEqual(y1, y0)
self.assertEqual(y2, y0)
def test_pytree_returns_broadcast_nested(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, 1))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, y0.t())
self.assertEqual(y2, y0.t())
def test_out_dims_must_be_int_or_collection_of_int_err_msg(self):
msg = 'must be an int or a python collection of ints'
tensor = torch.randn(2, 3)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims='lol')(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=('lol',))(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=None)(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(None,))(tensor)
def test_out_dims_and_num_outputs_mismatch_err_msg(self):
msg = 'not compatible'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0, 0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x), out_dims=(0,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0))(x)
def test_out_dim_out_of_bounds_err_msg(self):
msg = 'Dimension out of range'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=3)(x)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=-4)(x)
def test_non_zero_in_dims(self):
tensor = torch.randn(2, 3, 5)
output = vmap(lambda x: x, (1,))(tensor)
self.assertEqual(output, tensor.permute(1, 0, 2))
self.assertEqual(output.data_ptr(), tensor.data_ptr())
x = torch.randn(2, 3)
y = torch.randn(3, 2)
output = vmap(torch.mul, (0, 1))(x, y)
self.assertEqual(output, x * y.t())
output = vmap(torch.mul, (1, 0))(x, y)
self.assertEqual(output, x.t() * y)
def test_none_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (0, None))(x, y)
self.assertEqual(output.shape, (2, 2, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
output = vmap(torch.mul, (0, None))(x, 2)
self.assertEqual(output, x * 2)
def test_nested_non_default_in_dims(self):
x = torch.rand(5, 2, 3)
y = torch.rand(3, 5, 2)
result = vmap(vmap(vmap(torch.mul), (1, 0)), (1, 2))(x, y)
self.assertEqual(result, x.permute(1, 2, 0) * y.permute(2, 0, 1))
def test_nested_negative_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (-1, -1))(x, y)
self.assertEqual(output.shape, (3, 2))
self.assertEqual(output, (x * y).permute(1, 0))
def test_non_default_in_dims_out_dims(self):
x = torch.randn(2, 3, 5)
result = vmap(lambda x: x, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x)
self.assertEqual(result.data_ptr(), x.data_ptr())
result = vmap(lambda x: x, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, x.transpose(1, 2))
self.assertEqual(result.data_ptr(), x.data_ptr())
def foo(x):
return x * 2
result = vmap(foo, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x * 2)
result = vmap(foo, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, (x * 2).transpose(1, 2))
result = vmap(vmap(foo, 1, 1), 1, 1)(x)
self.assertEqual(result, x * 2)
def test_item_throws(self):
def f(x):
return x.item()
with self.assertRaisesRegex(RuntimeError, r'item\(\) on a Tensor'):
vmap(f)(torch.randn(3))
def test_data_dependent_control_flow_throws(self):
def f(x):
if x:
return x
return 0
with self.assertRaisesRegex(RuntimeError, r'data-dependent control flow'):
vmap(f)(torch.randn(3))
def test_accepts_nested_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
out = vmap(lambda z: z[0] + z[1])((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1])([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'])({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=(0,))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out_fn = vmap(lambda z: z['x'][0] + z['x'][1][0] + z['y'][0] + z['y'][1])
out = out_fn({'x': [x, (x,)], 'y': [y, y]})
self.assertEqual(out, x + x + y + y)
def test_in_dims_wrong_type_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'expected `in_dims` to be int or a \(potentially nested\) tuple'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, [0, 0])(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, set({0, 0}))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, 'lol')(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=[0, 0])([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_not_enough_in_dims_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'in_dims is not compatible with the structure of `inputs`'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0,))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0, 0, 0))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0],))([x, y])
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_integer_in_dim_but_not_tensor_input_err_msg(self):
def foo(xy):
return xy[0] * xy[1]
def bar(x, yz):
return x * yz[0] * yz[1]
x = torch.randn(2, 3)
msg = 'Got in_dim=0 for an input but the input is of type'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum)(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum, (0, 0))(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, 1])
vmap(torch.sum, (0, None))(x, 0)
def test_in_dim_not_in_tensor_err_msg(self):
def foo(x):
return x * x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
msg = r'Got in_dim=-?\w for some input, but that input is a Tensor of dimensionality \w'
with self.assertRaisesRegex(ValueError, msg):
vmap(foo)(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(0,))(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(-3,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(2,))(y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([3, 0],))([x, y])
vmap(foo, in_dims=(0,))(torch.randn(2, 3))
vmap(foo, in_dims=(1,))(torch.randn(2, 3))
def test_fallback_does_not_warn_by_default(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
vmap(op)(x, y)
self.assertEqual(len(wa), 1)
@unittest.expectedFailure
def test_fallback_warns_when_warnings_are_enabled(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
with EnableVmapFallbackWarnings():
vmap(op)(x, y)
self.assertEqual(len(wa), 2)
self.assertRegex(str(wa[-1].message), FALLBACK_REGEX)
def _assert_uses_vmap_fallback(self, vmap_args, inputs):
return
def test_fallback_zero_dim(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0, B1 = 0, 3
x = torch.randn(B0, 11)
y = torch.randn(11)
msg = 'The fallback path does not support vmap over dims of size 0'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
x = torch.randn(B0, B1, 11)
y = torch.randn(B1, 11)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
def test_fallback_atan2(self):
op = torch.copysign
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(op, (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(100, 10, 10, 5)
y = torch.randn(100, 10, 10)
result = vmap(vmap(vmap(op)))(x, y)
self.assertEqual(result, op(x, y.view(100, 10, 10, 1)))
@unittest.skip
def test_fallback_masked_fill(self):
def run_test(batch_size):
B0 = batch_size
x = torch.randn(B0, 7, 11, 13)
dim = 0
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 13)
self._assert_uses_vmap_fallback((torch.index_add, (0, None, None, 0)), (x, dim, index, values))
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
expected = torch.index_add(
x, dim + 1, index, values.view(B0, 3, 1, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
run_test(batch_size=1237)
def test_fallback_multiple_returns(self):
B0, B1, B2 = 2, 3, 1237
tensor = torch.randn(B0, 10)
self._assert_uses_vmap_fallback((torch.var_mean,), (tensor,))
result = vmap(torch.var_mean)(tensor)
expected = torch.var_mean(tensor, dim=1)
self.assertEqual(result, expected)
tensor = torch.randn(B0, B1, 10)
result = vmap(vmap(torch.var_mean))(tensor)
expected = torch.var_mean(tensor, dim=2)
self.assertEqual(result, expected)
tensor = torch.randn(B0, B1, B2, 10)
result = vmap(vmap(vmap(torch.var_mean)))(tensor)
expected = torch.var_mean(tensor, dim=3)
self.assertEqual(result, expected)
def test_inplace_fallback_unary(self):
op = Tensor.acos_
B0, B1, B2 = 2, 3, 10000
x = torch.randn(B0, 5)
self._assert_uses_vmap_fallback((op,), (x,))
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op)(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op, out_dims=(1,))(x)
self.assertTrue(result._base is x)
self.assertEqual(result, x_orig.t().acos())
x_orig = torch.randn(B0, B1, 5)
x = x_orig.clone()
result = vmap(vmap(op))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
result = vmap(vmap(vmap(op)))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
def test_inplace_fallback_nary_same_levels(self):
op = Tensor.atan2_
outplace_op = torch.atan2
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0 = 5
x_orig = torch.randn(7, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, 7, 11)
vmap(op, (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim(0, 2)))
B0, B1 = 5, 7
x_orig = torch.randn(B1, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, B1, 11)
vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim([0, 1], [2, 0])))
B0, B1, B2 = 100, 10, 10
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
y = torch.randn(B0, B1, B2)
vmap(vmap(vmap(op)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, B1, B2, 1)))
@unittest.expectedFailure
def test_inplace_fallback_nary_different_levels(self):
op = Tensor.atan2_
outplace_op = torch.atan2
B0, B1 = 2, 3
x = torch.rand(B0, 7)
y = torch.rand(7)
self._assert_uses_vmap_fallback((op, (0, None)), (x, y))
x_orig = torch.rand(B0, 7)
x = x_orig.clone()
y = torch.rand(7)
vmap(op, in_dims=(0, None))(x, y)
self.assertEqual(x, outplace_op(x_orig, y))
x_orig = torch.rand(B0, B1, 7)
x = x_orig.clone()
y = torch.rand(B0, 7)
vmap(vmap(op, in_dims=(0, None)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, 1, 7)))
msg = r'vmap: aten::atan2_\(self, \*extra_args\) is not possible'
x = torch.rand(7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(7, B0)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 1))(x, y)
x = torch.rand(B0, 7)
y = torch.rand(B0, B1, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(None, 0)))(x, y)
def test_backward_unsupported_interaction(self):
x = torch.randn(3, requires_grad=True)
y = torch.randn(5)
grad = torch.randn_like(x)
err_msg = r'backward\(\) called inside a functorch transform'
def backward_on_vmapped_tensor(x):
x.sum().backward()
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_on_vmapped_tensor)(x)
def backward_with_vmapped_grad(x, grad):
x.backward(grad)
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_with_vmapped_grad)(x, grad)
def completely_unrelated_backward(y):
x.sum().backward()
return y
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(completely_unrelated_backward)(y)
@unittest.expectedFailure
def test_grad_unsupported_interaction(self):
input_tensor = torch.randn(3, requires_grad=True)
err_msg = 'autograd.grad.* called inside torch.vmap'
captured = torch.randn(3, requires_grad=True)
def output_to_grad_is_vmapped(input_tensor):
output = (captured * input_tensor).sum()
return torch.autograd.grad([output], [captured])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(output_to_grad_is_vmapped)(input_tensor)
output = (input_tensor ** 2).sum()
def input_to_grad_is_vmapped(input_tensor):
return torch.autograd.grad([output], [input_tensor])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(input_to_grad_is_vmapped)(input_tensor)
def test_batched_gradient_basic(self):
N = 3
x = torch.randn(N, requires_grad=True)
y = torch.randn(N)
def vjp_mul(v):
return torch.autograd.grad([x * y], [x], grad_outputs=[v])[0]
batched_v = torch.eye(N)
jacobian = vmap(vjp_mul)(batched_v)
self.assertEqual(jacobian, torch.diagflat(y))
def test_functools_partial(self):
x = torch.randn(3)
y = torch.randn(2, 3)
result = vmap(functools.partial(torch.mul, x))(y)
self.assertEqual(result, x * y)
def test_nn_module(self):
tensor = torch.randn(2, 3)
model = torch.nn.Linear(3, 3, bias=False)
result = vmap(model)(tensor)
self.assertEqual(result, model(tensor))
def test_fallback_with_undefined_grad(self):
B0 = 7
x = torch.randn(2, 3, 4, 5, requires_grad=True)
weight = torch.randn(3, 3, 1, 1)
v = torch.randn(B0, 2, 3, 4, 5)
def get_vjp(v):
result =
|
torch
|
torch.nn.functional.conv2d(x, weight)
|
grad_x, = torch.autograd.grad(result, x, v)
return grad_x
|
torch
|
torch.nn.functional.conv2d
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapAPI(TestCase):
def test_non_tensor_output_raises(self):
with self.assertRaisesRegex(ValueError, "got type <class 'float'> as a return"):
vmap(lambda x: 3.14)(torch.ones(3))
def multiple_outputs(x):
return x, 3
with self.assertRaisesRegex(ValueError, "got type <class 'int'> as a return"):
vmap(multiple_outputs)(torch.ones(3))
def test_different_map_dim_size_raises(self):
x = torch.randn(2)
y = torch.randn(3)
expected_msg = 'Expected all tensors to have the same size in the mapped dimension'
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(torch.mul)(x, y)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
def test_func_with_no_inputs(self):
expected_msg = 'got no inputs'
def foo():
return torch.randn(3)
def bar(x):
return torch.randn(3)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(foo)()
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(bar)()
def test_constant_function(self):
output = vmap(lambda x: torch.tensor(3.14))(torch.ones(3))
self.assertEqual(output, torch.tensor([3.14, 3.14, 3.14]))
def test_single_input(self):
x = torch.randn(2, 3)
def square(x):
return x * x
output = vmap(square)(x)
self.assertEqual(output, x * x)
def test_multiple_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul)(x, y)
self.assertEqual(output, x * y)
def test_multiple_outputs(self):
def foo(x):
return x * x, x * x * x
x = torch.randn(3)
outputs = vmap(foo)(x)
self.assertEqual(outputs[0], x * x)
self.assertEqual(outputs[1], x * x * x)
def test_multiple_outputs2(self):
def returns_tuple_of_tensors(x):
return (x, x)
def returns_list_of_two_tensors(x):
return [x, x]
def returns_list_of_one_tensor(x):
return [x]
x = torch.randn(3)
vmap(returns_tuple_of_tensors)(x)
vmap(returns_list_of_two_tensors)(x)
vmap(returns_list_of_one_tensor)(x)
def test_nested_with_same_map_dim(self):
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
output = vmap(vmap(torch.mul))(x, y)
self.assertEqual(output, x * y)
output = vmap(vmap(vmap(torch.mul)))(x, y)
self.assertEqual(output, x * y)
def test_nested_with_diag_embed(self):
x = torch.randn(3, 3, 5)
output = vmap(vmap(torch.diag_embed))(x)
self.assertEqual(output, torch.diag_embed(x))
def test_nested_with_different_map_dim(self):
x = torch.randn(2, 3)
y = torch.randn(5, 3)
output = vmap(lambda x: vmap(lambda y: x * y)(y))(x)
self.assertEqual(output.shape, (2, 5, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
z = torch.randn(7, 3)
output = vmap(lambda x: vmap(lambda y: vmap(lambda z: x * y * z)(z))(y))(x)
self.assertEqual(output.shape, (2, 5, 7, 3))
self.assertEqual(output, x.view(2, 1, 1, 3) * y.view(5, 1, 3) * z)
def test_noop_in_inner_vmap(self):
x = torch.randn(3)
y = torch.randn(5)
output = vmap(lambda x: vmap(lambda y: x)(y))(x)
self.assertEqual(output, x.view(3, 1).expand(3, 5))
def test_unsupported_op_err_msg(self):
tensor = torch.randn(2, 3)
msg = (
r"Batching rule not implemented for aten::.+; the "
r"fallback path doesn't work on out= or view ops"
)
def out_op(x, y):
return torch.abs(x, out=y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(out_op)(tensor, tensor)
with self.assertRaisesRegex(RuntimeError, 'Batching rule not implemented'):
vmap(torch.equal)(tensor, tensor)
def test_nonzero_out_dims(self):
tensor = torch.randn(2, 3)
result = vmap(lambda x: x, out_dims=1)(tensor)
self.assertEqual(result, tensor.permute(1, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 0, 3))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=-1)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 3, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
other = torch.randn(2, 3, 5, 7)
result = vmap(lambda x, y: (x, y), out_dims=2)(tensor, other)
self.assertEqual(result, (tensor.permute(1, 2, 0, 3), other.permute(1, 2, 0, 3)))
ndims = 64
shape = [2] + [1] * (ndims - 1)
expected_shape = [1, 1, 2] + [1] * (ndims - 3)
tensor = torch.randn(shape)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result.shape, expected_shape)
def foo(x, y):
return x, x * y, x * y * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=1)(x, y)
self.assertEqual(
result,
(x.permute(1, 0, 2), (x * y).permute(1, 0, 2), (x * y * y).permute(1, 0, 2)))
def test_multiple_out_dims(self):
def foo(x):
return x, x
def bar(x, y):
return x, x, x, x * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=(0, 1))(x)
self.assertEqual(result, (x, x.permute(1, 0, 2)))
result = vmap(bar, out_dims=(-1, 0, 1, 2))(x, y)
expected = (
x.permute(1, 2, 0),
x,
x.permute(1, 0, 2),
(x * y).permute(1, 2, 0),
)
self.assertEqual(result, expected)
def test_nested_out_dims(self):
y = torch.randn(2, 3, 5, 7)
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y))(y)
self.assertEqual(result.shape, (2, 5, 3, 7))
self.assertEqual(result, y.permute(0, 2, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y), out_dims=1)(y)
self.assertEqual(result.shape, (5, 2, 3, 7))
self.assertEqual(result, y.permute(2, 0, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=-1)(y), out_dims=-1)(y)
self.assertEqual(result.shape, (5, 7, 3, 2))
self.assertEqual(result, y.permute(2, 3, 1, 0))
x = torch.randn(2, 3)
y = torch.randn(5, 3)
result = vmap(lambda y: vmap(lambda x: x * y, out_dims=1)(x), out_dims=-1)(y)
self.assertEqual(result.shape, (3, 2, 5))
self.assertEqual(result, (y.view(5, 1, 3) * x).permute(2, 1, 0))
def test_out_dims_edge_case(self):
def foo(x):
return x
tensor = torch.randn(2, 3)
expected = vmap(foo, out_dims=1)(tensor)
result = vmap(foo, out_dims=(1,))(tensor)
self.assertEqual(result, expected)
def test_pytree_returns(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y), [y, (y, y)]
y0, (y1, y2), (y3, (y4, y5)) = vmap(f)(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y0, y1)
self.assertEqual(y2, y1)
self.assertEqual(y2, y3)
self.assertEqual(y4, y3)
self.assertEqual(y5, y4)
def test_pytree_odict_returns(self):
x = torch.randn(2, 3)
def f(t):
y = t.sin()
return OrderedDict([("sin", y), ("cos", t.cos())])
out = vmap(f)(x)
assert isinstance(out, OrderedDict)
expected = f(x)
self.assertEqual(out["sin"], expected["sin"])
self.assertEqual(out["cos"], expected["cos"])
def test_pytest_odict_flatten_unflatten(self):
from functorch._src.vmap import _odict_flatten, _odict_unflatten
x = torch.randn(2, 3)
inpt = OrderedDict([("sin", x.sin()), ("cos", x.cos())])
out = _odict_flatten(inpt)
self.assertEqual(out[0], list(inpt.values()))
self.assertEqual(out[1], list(inpt.keys()))
recon_inpt = _odict_unflatten(*out)
self.assertEqual(recon_inpt, inpt)
def test_pytree_returns_outdims(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, (0, 1)))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, x.sin())
self.assertEqual(y2, x.sin().t())
def test_pytree_returns_broadcast_simple(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=1)(x)
self.assertEqual(y0, x.sin().t())
self.assertEqual(y1, y0)
self.assertEqual(y2, y0)
def test_pytree_returns_broadcast_nested(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, 1))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, y0.t())
self.assertEqual(y2, y0.t())
def test_out_dims_must_be_int_or_collection_of_int_err_msg(self):
msg = 'must be an int or a python collection of ints'
tensor = torch.randn(2, 3)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims='lol')(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=('lol',))(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=None)(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(None,))(tensor)
def test_out_dims_and_num_outputs_mismatch_err_msg(self):
msg = 'not compatible'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0, 0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x), out_dims=(0,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0))(x)
def test_out_dim_out_of_bounds_err_msg(self):
msg = 'Dimension out of range'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=3)(x)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=-4)(x)
def test_non_zero_in_dims(self):
tensor = torch.randn(2, 3, 5)
output = vmap(lambda x: x, (1,))(tensor)
self.assertEqual(output, tensor.permute(1, 0, 2))
self.assertEqual(output.data_ptr(), tensor.data_ptr())
x = torch.randn(2, 3)
y = torch.randn(3, 2)
output = vmap(torch.mul, (0, 1))(x, y)
self.assertEqual(output, x * y.t())
output = vmap(torch.mul, (1, 0))(x, y)
self.assertEqual(output, x.t() * y)
def test_none_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (0, None))(x, y)
self.assertEqual(output.shape, (2, 2, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
output = vmap(torch.mul, (0, None))(x, 2)
self.assertEqual(output, x * 2)
def test_nested_non_default_in_dims(self):
x = torch.rand(5, 2, 3)
y = torch.rand(3, 5, 2)
result = vmap(vmap(vmap(torch.mul), (1, 0)), (1, 2))(x, y)
self.assertEqual(result, x.permute(1, 2, 0) * y.permute(2, 0, 1))
def test_nested_negative_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (-1, -1))(x, y)
self.assertEqual(output.shape, (3, 2))
self.assertEqual(output, (x * y).permute(1, 0))
def test_non_default_in_dims_out_dims(self):
x = torch.randn(2, 3, 5)
result = vmap(lambda x: x, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x)
self.assertEqual(result.data_ptr(), x.data_ptr())
result = vmap(lambda x: x, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, x.transpose(1, 2))
self.assertEqual(result.data_ptr(), x.data_ptr())
def foo(x):
return x * 2
result = vmap(foo, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x * 2)
result = vmap(foo, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, (x * 2).transpose(1, 2))
result = vmap(vmap(foo, 1, 1), 1, 1)(x)
self.assertEqual(result, x * 2)
def test_item_throws(self):
def f(x):
return x.item()
with self.assertRaisesRegex(RuntimeError, r'item\(\) on a Tensor'):
vmap(f)(torch.randn(3))
def test_data_dependent_control_flow_throws(self):
def f(x):
if x:
return x
return 0
with self.assertRaisesRegex(RuntimeError, r'data-dependent control flow'):
vmap(f)(torch.randn(3))
def test_accepts_nested_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
out = vmap(lambda z: z[0] + z[1])((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1])([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'])({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=(0,))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out_fn = vmap(lambda z: z['x'][0] + z['x'][1][0] + z['y'][0] + z['y'][1])
out = out_fn({'x': [x, (x,)], 'y': [y, y]})
self.assertEqual(out, x + x + y + y)
def test_in_dims_wrong_type_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'expected `in_dims` to be int or a \(potentially nested\) tuple'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, [0, 0])(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, set({0, 0}))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, 'lol')(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=[0, 0])([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_not_enough_in_dims_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'in_dims is not compatible with the structure of `inputs`'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0,))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0, 0, 0))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0],))([x, y])
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_integer_in_dim_but_not_tensor_input_err_msg(self):
def foo(xy):
return xy[0] * xy[1]
def bar(x, yz):
return x * yz[0] * yz[1]
x = torch.randn(2, 3)
msg = 'Got in_dim=0 for an input but the input is of type'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum)(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum, (0, 0))(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, 1])
vmap(torch.sum, (0, None))(x, 0)
def test_in_dim_not_in_tensor_err_msg(self):
def foo(x):
return x * x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
msg = r'Got in_dim=-?\w for some input, but that input is a Tensor of dimensionality \w'
with self.assertRaisesRegex(ValueError, msg):
vmap(foo)(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(0,))(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(-3,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(2,))(y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([3, 0],))([x, y])
vmap(foo, in_dims=(0,))(torch.randn(2, 3))
vmap(foo, in_dims=(1,))(torch.randn(2, 3))
def test_fallback_does_not_warn_by_default(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
vmap(op)(x, y)
self.assertEqual(len(wa), 1)
@unittest.expectedFailure
def test_fallback_warns_when_warnings_are_enabled(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
with EnableVmapFallbackWarnings():
vmap(op)(x, y)
self.assertEqual(len(wa), 2)
self.assertRegex(str(wa[-1].message), FALLBACK_REGEX)
def _assert_uses_vmap_fallback(self, vmap_args, inputs):
return
def test_fallback_zero_dim(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0, B1 = 0, 3
x = torch.randn(B0, 11)
y = torch.randn(11)
msg = 'The fallback path does not support vmap over dims of size 0'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
x = torch.randn(B0, B1, 11)
y = torch.randn(B1, 11)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
def test_fallback_atan2(self):
op = torch.copysign
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(op, (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(100, 10, 10, 5)
y = torch.randn(100, 10, 10)
result = vmap(vmap(vmap(op)))(x, y)
self.assertEqual(result, op(x, y.view(100, 10, 10, 1)))
@unittest.skip
def test_fallback_masked_fill(self):
def run_test(batch_size):
B0 = batch_size
x = torch.randn(B0, 7, 11, 13)
dim = 0
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 13)
self._assert_uses_vmap_fallback((torch.index_add, (0, None, None, 0)), (x, dim, index, values))
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
expected = torch.index_add(
x, dim + 1, index, values.view(B0, 3, 1, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
run_test(batch_size=1237)
def test_fallback_multiple_returns(self):
B0, B1, B2 = 2, 3, 1237
tensor = torch.randn(B0, 10)
self._assert_uses_vmap_fallback((torch.var_mean,), (tensor,))
result = vmap(torch.var_mean)(tensor)
expected = torch.var_mean(tensor, dim=1)
self.assertEqual(result, expected)
tensor = torch.randn(B0, B1, 10)
result = vmap(vmap(torch.var_mean))(tensor)
expected = torch.var_mean(tensor, dim=2)
self.assertEqual(result, expected)
tensor = torch.randn(B0, B1, B2, 10)
result = vmap(vmap(vmap(torch.var_mean)))(tensor)
expected = torch.var_mean(tensor, dim=3)
self.assertEqual(result, expected)
def test_inplace_fallback_unary(self):
op = Tensor.acos_
B0, B1, B2 = 2, 3, 10000
x = torch.randn(B0, 5)
self._assert_uses_vmap_fallback((op,), (x,))
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op)(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op, out_dims=(1,))(x)
self.assertTrue(result._base is x)
self.assertEqual(result, x_orig.t().acos())
x_orig = torch.randn(B0, B1, 5)
x = x_orig.clone()
result = vmap(vmap(op))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
result = vmap(vmap(vmap(op)))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
def test_inplace_fallback_nary_same_levels(self):
op = Tensor.atan2_
outplace_op = torch.atan2
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0 = 5
x_orig = torch.randn(7, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, 7, 11)
vmap(op, (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim(0, 2)))
B0, B1 = 5, 7
x_orig = torch.randn(B1, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, B1, 11)
vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim([0, 1], [2, 0])))
B0, B1, B2 = 100, 10, 10
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
y = torch.randn(B0, B1, B2)
vmap(vmap(vmap(op)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, B1, B2, 1)))
@unittest.expectedFailure
def test_inplace_fallback_nary_different_levels(self):
op = Tensor.atan2_
outplace_op = torch.atan2
B0, B1 = 2, 3
x = torch.rand(B0, 7)
y = torch.rand(7)
self._assert_uses_vmap_fallback((op, (0, None)), (x, y))
x_orig = torch.rand(B0, 7)
x = x_orig.clone()
y = torch.rand(7)
vmap(op, in_dims=(0, None))(x, y)
self.assertEqual(x, outplace_op(x_orig, y))
x_orig = torch.rand(B0, B1, 7)
x = x_orig.clone()
y = torch.rand(B0, 7)
vmap(vmap(op, in_dims=(0, None)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, 1, 7)))
msg = r'vmap: aten::atan2_\(self, \*extra_args\) is not possible'
x = torch.rand(7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(7, B0)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 1))(x, y)
x = torch.rand(B0, 7)
y = torch.rand(B0, B1, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(None, 0)))(x, y)
def test_backward_unsupported_interaction(self):
x = torch.randn(3, requires_grad=True)
y = torch.randn(5)
grad = torch.randn_like(x)
err_msg = r'backward\(\) called inside a functorch transform'
def backward_on_vmapped_tensor(x):
x.sum().backward()
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_on_vmapped_tensor)(x)
def backward_with_vmapped_grad(x, grad):
x.backward(grad)
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_with_vmapped_grad)(x, grad)
def completely_unrelated_backward(y):
x.sum().backward()
return y
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(completely_unrelated_backward)(y)
@unittest.expectedFailure
def test_grad_unsupported_interaction(self):
input_tensor = torch.randn(3, requires_grad=True)
err_msg = 'autograd.grad.* called inside torch.vmap'
captured = torch.randn(3, requires_grad=True)
def output_to_grad_is_vmapped(input_tensor):
output = (captured * input_tensor).sum()
return torch.autograd.grad([output], [captured])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(output_to_grad_is_vmapped)(input_tensor)
output = (input_tensor ** 2).sum()
def input_to_grad_is_vmapped(input_tensor):
return torch.autograd.grad([output], [input_tensor])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(input_to_grad_is_vmapped)(input_tensor)
def test_batched_gradient_basic(self):
N = 3
x = torch.randn(N, requires_grad=True)
y = torch.randn(N)
def vjp_mul(v):
return torch.autograd.grad([x * y], [x], grad_outputs=[v])[0]
batched_v = torch.eye(N)
jacobian = vmap(vjp_mul)(batched_v)
self.assertEqual(jacobian, torch.diagflat(y))
def test_functools_partial(self):
x = torch.randn(3)
y = torch.randn(2, 3)
result = vmap(functools.partial(torch.mul, x))(y)
self.assertEqual(result, x * y)
def test_nn_module(self):
tensor = torch.randn(2, 3)
model = torch.nn.Linear(3, 3, bias=False)
result = vmap(model)(tensor)
self.assertEqual(result, model(tensor))
def test_fallback_with_undefined_grad(self):
B0 = 7
x = torch.randn(2, 3, 4, 5, requires_grad=True)
weight = torch.randn(3, 3, 1, 1)
v = torch.randn(B0, 2, 3, 4, 5)
def get_vjp(v):
result = torch.nn.functional.conv2d(x, weight)
grad_x, = torch.autograd.grad(result, x, v)
return grad_x
self._assert_uses_vmap_fallback([get_vjp], [v])
def test_reshape_dim_into(self):
x = torch.randn(2, 3, 5, 7)
y = reshape_dim_into(0, 0, x)
self.assertEqual(y, x.reshape(6, 5, 7))
y = reshape_dim_into(0, 1, x)
self.assertEqual(y, x.movedim(0, 1).reshape(3, 2 * 5, 7))
y = reshape_dim_into(0, 2, x)
self.assertEqual(y, x.movedim(0, 2).reshape(3, 5, 2 * 7))
y = reshape_dim_into(1, 2, x)
self.assertEqual(y, x.movedim(1, 2).reshape(2, 5, 3 * 7))
y = reshape_dim_into(0, -2, x)
self.assertEqual(y, x.movedim(0, 1).reshape(3, 2 * 5, 7))
y = reshape_dim_into(0, -1, x)
self.assertEqual(y, x.movedim(0, 2).reshape(3, 5, 2 * 7))
y = reshape_dim_into(-4, -1, x)
self.assertEqual(y, x.movedim(0, 2).reshape(3, 5, 2 * 7))
def test_reshape_dim_outof(self):
x =
|
torch
|
torch.randn(12, 12, 12).permute(2, 1, 0)
|
y = reshape_dim_outof(0, 2, x)
self.assertEqual(y, x.reshape(2, 6, 12, 12))
|
torch
|
torch.randn.permute
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapAPI(TestCase):
def test_non_tensor_output_raises(self):
with self.assertRaisesRegex(ValueError, "got type <class 'float'> as a return"):
vmap(lambda x: 3.14)(torch.ones(3))
def multiple_outputs(x):
return x, 3
with self.assertRaisesRegex(ValueError, "got type <class 'int'> as a return"):
vmap(multiple_outputs)(torch.ones(3))
def test_different_map_dim_size_raises(self):
x = torch.randn(2)
y = torch.randn(3)
expected_msg = 'Expected all tensors to have the same size in the mapped dimension'
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(torch.mul)(x, y)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
def test_func_with_no_inputs(self):
expected_msg = 'got no inputs'
def foo():
return torch.randn(3)
def bar(x):
return torch.randn(3)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(foo)()
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(bar)()
def test_constant_function(self):
output = vmap(lambda x: torch.tensor(3.14))(torch.ones(3))
self.assertEqual(output, torch.tensor([3.14, 3.14, 3.14]))
def test_single_input(self):
x = torch.randn(2, 3)
def square(x):
return x * x
output = vmap(square)(x)
self.assertEqual(output, x * x)
def test_multiple_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul)(x, y)
self.assertEqual(output, x * y)
def test_multiple_outputs(self):
def foo(x):
return x * x, x * x * x
x = torch.randn(3)
outputs = vmap(foo)(x)
self.assertEqual(outputs[0], x * x)
self.assertEqual(outputs[1], x * x * x)
def test_multiple_outputs2(self):
def returns_tuple_of_tensors(x):
return (x, x)
def returns_list_of_two_tensors(x):
return [x, x]
def returns_list_of_one_tensor(x):
return [x]
x = torch.randn(3)
vmap(returns_tuple_of_tensors)(x)
vmap(returns_list_of_two_tensors)(x)
vmap(returns_list_of_one_tensor)(x)
def test_nested_with_same_map_dim(self):
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
output = vmap(vmap(torch.mul))(x, y)
self.assertEqual(output, x * y)
output = vmap(vmap(vmap(torch.mul)))(x, y)
self.assertEqual(output, x * y)
def test_nested_with_diag_embed(self):
x = torch.randn(3, 3, 5)
output = vmap(vmap(torch.diag_embed))(x)
self.assertEqual(output, torch.diag_embed(x))
def test_nested_with_different_map_dim(self):
x = torch.randn(2, 3)
y = torch.randn(5, 3)
output = vmap(lambda x: vmap(lambda y: x * y)(y))(x)
self.assertEqual(output.shape, (2, 5, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
z = torch.randn(7, 3)
output = vmap(lambda x: vmap(lambda y: vmap(lambda z: x * y * z)(z))(y))(x)
self.assertEqual(output.shape, (2, 5, 7, 3))
self.assertEqual(output, x.view(2, 1, 1, 3) * y.view(5, 1, 3) * z)
def test_noop_in_inner_vmap(self):
x = torch.randn(3)
y = torch.randn(5)
output = vmap(lambda x: vmap(lambda y: x)(y))(x)
self.assertEqual(output, x.view(3, 1).expand(3, 5))
def test_unsupported_op_err_msg(self):
tensor = torch.randn(2, 3)
msg = (
r"Batching rule not implemented for aten::.+; the "
r"fallback path doesn't work on out= or view ops"
)
def out_op(x, y):
return torch.abs(x, out=y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(out_op)(tensor, tensor)
with self.assertRaisesRegex(RuntimeError, 'Batching rule not implemented'):
vmap(torch.equal)(tensor, tensor)
def test_nonzero_out_dims(self):
tensor = torch.randn(2, 3)
result = vmap(lambda x: x, out_dims=1)(tensor)
self.assertEqual(result, tensor.permute(1, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 0, 3))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=-1)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 3, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
other = torch.randn(2, 3, 5, 7)
result = vmap(lambda x, y: (x, y), out_dims=2)(tensor, other)
self.assertEqual(result, (tensor.permute(1, 2, 0, 3), other.permute(1, 2, 0, 3)))
ndims = 64
shape = [2] + [1] * (ndims - 1)
expected_shape = [1, 1, 2] + [1] * (ndims - 3)
tensor = torch.randn(shape)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result.shape, expected_shape)
def foo(x, y):
return x, x * y, x * y * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=1)(x, y)
self.assertEqual(
result,
(x.permute(1, 0, 2), (x * y).permute(1, 0, 2), (x * y * y).permute(1, 0, 2)))
def test_multiple_out_dims(self):
def foo(x):
return x, x
def bar(x, y):
return x, x, x, x * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=(0, 1))(x)
self.assertEqual(result, (x, x.permute(1, 0, 2)))
result = vmap(bar, out_dims=(-1, 0, 1, 2))(x, y)
expected = (
x.permute(1, 2, 0),
x,
x.permute(1, 0, 2),
(x * y).permute(1, 2, 0),
)
self.assertEqual(result, expected)
def test_nested_out_dims(self):
y = torch.randn(2, 3, 5, 7)
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y))(y)
self.assertEqual(result.shape, (2, 5, 3, 7))
self.assertEqual(result, y.permute(0, 2, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y), out_dims=1)(y)
self.assertEqual(result.shape, (5, 2, 3, 7))
self.assertEqual(result, y.permute(2, 0, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=-1)(y), out_dims=-1)(y)
self.assertEqual(result.shape, (5, 7, 3, 2))
self.assertEqual(result, y.permute(2, 3, 1, 0))
x = torch.randn(2, 3)
y = torch.randn(5, 3)
result = vmap(lambda y: vmap(lambda x: x * y, out_dims=1)(x), out_dims=-1)(y)
self.assertEqual(result.shape, (3, 2, 5))
self.assertEqual(result, (y.view(5, 1, 3) * x).permute(2, 1, 0))
def test_out_dims_edge_case(self):
def foo(x):
return x
tensor = torch.randn(2, 3)
expected = vmap(foo, out_dims=1)(tensor)
result = vmap(foo, out_dims=(1,))(tensor)
self.assertEqual(result, expected)
def test_pytree_returns(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y), [y, (y, y)]
y0, (y1, y2), (y3, (y4, y5)) = vmap(f)(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y0, y1)
self.assertEqual(y2, y1)
self.assertEqual(y2, y3)
self.assertEqual(y4, y3)
self.assertEqual(y5, y4)
def test_pytree_odict_returns(self):
x = torch.randn(2, 3)
def f(t):
y = t.sin()
return OrderedDict([("sin", y), ("cos", t.cos())])
out = vmap(f)(x)
assert isinstance(out, OrderedDict)
expected = f(x)
self.assertEqual(out["sin"], expected["sin"])
self.assertEqual(out["cos"], expected["cos"])
def test_pytest_odict_flatten_unflatten(self):
from functorch._src.vmap import _odict_flatten, _odict_unflatten
x = torch.randn(2, 3)
inpt = OrderedDict([("sin", x.sin()), ("cos", x.cos())])
out = _odict_flatten(inpt)
self.assertEqual(out[0], list(inpt.values()))
self.assertEqual(out[1], list(inpt.keys()))
recon_inpt = _odict_unflatten(*out)
self.assertEqual(recon_inpt, inpt)
def test_pytree_returns_outdims(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, (0, 1)))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, x.sin())
self.assertEqual(y2, x.sin().t())
def test_pytree_returns_broadcast_simple(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=1)(x)
self.assertEqual(y0, x.sin().t())
self.assertEqual(y1, y0)
self.assertEqual(y2, y0)
def test_pytree_returns_broadcast_nested(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, 1))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, y0.t())
self.assertEqual(y2, y0.t())
def test_out_dims_must_be_int_or_collection_of_int_err_msg(self):
msg = 'must be an int or a python collection of ints'
tensor = torch.randn(2, 3)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims='lol')(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=('lol',))(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=None)(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(None,))(tensor)
def test_out_dims_and_num_outputs_mismatch_err_msg(self):
msg = 'not compatible'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0, 0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x), out_dims=(0,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0))(x)
def test_out_dim_out_of_bounds_err_msg(self):
msg = 'Dimension out of range'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=3)(x)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=-4)(x)
def test_non_zero_in_dims(self):
tensor = torch.randn(2, 3, 5)
output = vmap(lambda x: x, (1,))(tensor)
self.assertEqual(output, tensor.permute(1, 0, 2))
self.assertEqual(output.data_ptr(), tensor.data_ptr())
x = torch.randn(2, 3)
y = torch.randn(3, 2)
output = vmap(torch.mul, (0, 1))(x, y)
self.assertEqual(output, x * y.t())
output = vmap(torch.mul, (1, 0))(x, y)
self.assertEqual(output, x.t() * y)
def test_none_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (0, None))(x, y)
self.assertEqual(output.shape, (2, 2, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
output = vmap(torch.mul, (0, None))(x, 2)
self.assertEqual(output, x * 2)
def test_nested_non_default_in_dims(self):
x = torch.rand(5, 2, 3)
y = torch.rand(3, 5, 2)
result = vmap(vmap(vmap(torch.mul), (1, 0)), (1, 2))(x, y)
self.assertEqual(result, x.permute(1, 2, 0) * y.permute(2, 0, 1))
def test_nested_negative_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (-1, -1))(x, y)
self.assertEqual(output.shape, (3, 2))
self.assertEqual(output, (x * y).permute(1, 0))
def test_non_default_in_dims_out_dims(self):
x = torch.randn(2, 3, 5)
result = vmap(lambda x: x, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x)
self.assertEqual(result.data_ptr(), x.data_ptr())
result = vmap(lambda x: x, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, x.transpose(1, 2))
self.assertEqual(result.data_ptr(), x.data_ptr())
def foo(x):
return x * 2
result = vmap(foo, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x * 2)
result = vmap(foo, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, (x * 2).transpose(1, 2))
result = vmap(vmap(foo, 1, 1), 1, 1)(x)
self.assertEqual(result, x * 2)
def test_item_throws(self):
def f(x):
return x.item()
with self.assertRaisesRegex(RuntimeError, r'item\(\) on a Tensor'):
vmap(f)(torch.randn(3))
def test_data_dependent_control_flow_throws(self):
def f(x):
if x:
return x
return 0
with self.assertRaisesRegex(RuntimeError, r'data-dependent control flow'):
vmap(f)(torch.randn(3))
def test_accepts_nested_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
out = vmap(lambda z: z[0] + z[1])((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1])([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'])({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=(0,))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out_fn = vmap(lambda z: z['x'][0] + z['x'][1][0] + z['y'][0] + z['y'][1])
out = out_fn({'x': [x, (x,)], 'y': [y, y]})
self.assertEqual(out, x + x + y + y)
def test_in_dims_wrong_type_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'expected `in_dims` to be int or a \(potentially nested\) tuple'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, [0, 0])(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, set({0, 0}))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, 'lol')(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=[0, 0])([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_not_enough_in_dims_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'in_dims is not compatible with the structure of `inputs`'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0,))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0, 0, 0))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0],))([x, y])
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_integer_in_dim_but_not_tensor_input_err_msg(self):
def foo(xy):
return xy[0] * xy[1]
def bar(x, yz):
return x * yz[0] * yz[1]
x = torch.randn(2, 3)
msg = 'Got in_dim=0 for an input but the input is of type'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum)(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum, (0, 0))(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, 1])
vmap(torch.sum, (0, None))(x, 0)
def test_in_dim_not_in_tensor_err_msg(self):
def foo(x):
return x * x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
msg = r'Got in_dim=-?\w for some input, but that input is a Tensor of dimensionality \w'
with self.assertRaisesRegex(ValueError, msg):
vmap(foo)(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(0,))(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(-3,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(2,))(y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([3, 0],))([x, y])
vmap(foo, in_dims=(0,))(torch.randn(2, 3))
vmap(foo, in_dims=(1,))(torch.randn(2, 3))
def test_fallback_does_not_warn_by_default(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
vmap(op)(x, y)
self.assertEqual(len(wa), 1)
@unittest.expectedFailure
def test_fallback_warns_when_warnings_are_enabled(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
with EnableVmapFallbackWarnings():
vmap(op)(x, y)
self.assertEqual(len(wa), 2)
self.assertRegex(str(wa[-1].message), FALLBACK_REGEX)
def _assert_uses_vmap_fallback(self, vmap_args, inputs):
return
def test_fallback_zero_dim(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0, B1 = 0, 3
x = torch.randn(B0, 11)
y = torch.randn(11)
msg = 'The fallback path does not support vmap over dims of size 0'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
x = torch.randn(B0, B1, 11)
y = torch.randn(B1, 11)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
def test_fallback_atan2(self):
op = torch.copysign
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(op, (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(100, 10, 10, 5)
y = torch.randn(100, 10, 10)
result = vmap(vmap(vmap(op)))(x, y)
self.assertEqual(result, op(x, y.view(100, 10, 10, 1)))
@unittest.skip
def test_fallback_masked_fill(self):
def run_test(batch_size):
B0 = batch_size
x = torch.randn(B0, 7, 11, 13)
dim = 0
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 13)
self._assert_uses_vmap_fallback((torch.index_add, (0, None, None, 0)), (x, dim, index, values))
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
expected = torch.index_add(
x, dim + 1, index, values.view(B0, 3, 1, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
run_test(batch_size=1237)
def test_fallback_multiple_returns(self):
B0, B1, B2 = 2, 3, 1237
tensor = torch.randn(B0, 10)
self._assert_uses_vmap_fallback((torch.var_mean,), (tensor,))
result = vmap(torch.var_mean)(tensor)
expected = torch.var_mean(tensor, dim=1)
self.assertEqual(result, expected)
tensor = torch.randn(B0, B1, 10)
result = vmap(vmap(torch.var_mean))(tensor)
expected = torch.var_mean(tensor, dim=2)
self.assertEqual(result, expected)
tensor = torch.randn(B0, B1, B2, 10)
result = vmap(vmap(vmap(torch.var_mean)))(tensor)
expected = torch.var_mean(tensor, dim=3)
self.assertEqual(result, expected)
def test_inplace_fallback_unary(self):
op = Tensor.acos_
B0, B1, B2 = 2, 3, 10000
x = torch.randn(B0, 5)
self._assert_uses_vmap_fallback((op,), (x,))
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op)(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op, out_dims=(1,))(x)
self.assertTrue(result._base is x)
self.assertEqual(result, x_orig.t().acos())
x_orig = torch.randn(B0, B1, 5)
x = x_orig.clone()
result = vmap(vmap(op))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
result = vmap(vmap(vmap(op)))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
def test_inplace_fallback_nary_same_levels(self):
op = Tensor.atan2_
outplace_op = torch.atan2
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0 = 5
x_orig = torch.randn(7, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, 7, 11)
vmap(op, (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim(0, 2)))
B0, B1 = 5, 7
x_orig = torch.randn(B1, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, B1, 11)
vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim([0, 1], [2, 0])))
B0, B1, B2 = 100, 10, 10
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
y = torch.randn(B0, B1, B2)
vmap(vmap(vmap(op)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, B1, B2, 1)))
@unittest.expectedFailure
def test_inplace_fallback_nary_different_levels(self):
op = Tensor.atan2_
outplace_op = torch.atan2
B0, B1 = 2, 3
x = torch.rand(B0, 7)
y = torch.rand(7)
self._assert_uses_vmap_fallback((op, (0, None)), (x, y))
x_orig = torch.rand(B0, 7)
x = x_orig.clone()
y = torch.rand(7)
vmap(op, in_dims=(0, None))(x, y)
self.assertEqual(x, outplace_op(x_orig, y))
x_orig = torch.rand(B0, B1, 7)
x = x_orig.clone()
y = torch.rand(B0, 7)
vmap(vmap(op, in_dims=(0, None)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, 1, 7)))
msg = r'vmap: aten::atan2_\(self, \*extra_args\) is not possible'
x = torch.rand(7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(7, B0)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 1))(x, y)
x = torch.rand(B0, 7)
y = torch.rand(B0, B1, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(None, 0)))(x, y)
def test_backward_unsupported_interaction(self):
x = torch.randn(3, requires_grad=True)
y = torch.randn(5)
grad = torch.randn_like(x)
err_msg = r'backward\(\) called inside a functorch transform'
def backward_on_vmapped_tensor(x):
x.sum().backward()
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_on_vmapped_tensor)(x)
def backward_with_vmapped_grad(x, grad):
x.backward(grad)
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_with_vmapped_grad)(x, grad)
def completely_unrelated_backward(y):
x.sum().backward()
return y
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(completely_unrelated_backward)(y)
@unittest.expectedFailure
def test_grad_unsupported_interaction(self):
input_tensor = torch.randn(3, requires_grad=True)
err_msg = 'autograd.grad.* called inside torch.vmap'
captured = torch.randn(3, requires_grad=True)
def output_to_grad_is_vmapped(input_tensor):
output = (captured * input_tensor).sum()
return torch.autograd.grad([output], [captured])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(output_to_grad_is_vmapped)(input_tensor)
output = (input_tensor ** 2).sum()
def input_to_grad_is_vmapped(input_tensor):
return torch.autograd.grad([output], [input_tensor])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(input_to_grad_is_vmapped)(input_tensor)
def test_batched_gradient_basic(self):
N = 3
x = torch.randn(N, requires_grad=True)
y = torch.randn(N)
def vjp_mul(v):
return torch.autograd.grad([x * y], [x], grad_outputs=[v])[0]
batched_v = torch.eye(N)
jacobian = vmap(vjp_mul)(batched_v)
self.assertEqual(jacobian, torch.diagflat(y))
def test_functools_partial(self):
x = torch.randn(3)
y = torch.randn(2, 3)
result = vmap(functools.partial(torch.mul, x))(y)
self.assertEqual(result, x * y)
def test_nn_module(self):
tensor = torch.randn(2, 3)
model = torch.nn.Linear(3, 3, bias=False)
result = vmap(model)(tensor)
self.assertEqual(result, model(tensor))
def test_fallback_with_undefined_grad(self):
B0 = 7
x = torch.randn(2, 3, 4, 5, requires_grad=True)
weight = torch.randn(3, 3, 1, 1)
v = torch.randn(B0, 2, 3, 4, 5)
def get_vjp(v):
result = torch.nn.functional.conv2d(x, weight)
grad_x, = torch.autograd.grad(result, x, v)
return grad_x
self._assert_uses_vmap_fallback([get_vjp], [v])
def test_reshape_dim_into(self):
x = torch.randn(2, 3, 5, 7)
y = reshape_dim_into(0, 0, x)
self.assertEqual(y, x.reshape(6, 5, 7))
y = reshape_dim_into(0, 1, x)
self.assertEqual(y, x.movedim(0, 1).reshape(3, 2 * 5, 7))
y = reshape_dim_into(0, 2, x)
self.assertEqual(y, x.movedim(0, 2).reshape(3, 5, 2 * 7))
y = reshape_dim_into(1, 2, x)
self.assertEqual(y, x.movedim(1, 2).reshape(2, 5, 3 * 7))
y = reshape_dim_into(0, -2, x)
self.assertEqual(y, x.movedim(0, 1).reshape(3, 2 * 5, 7))
y = reshape_dim_into(0, -1, x)
self.assertEqual(y, x.movedim(0, 2).reshape(3, 5, 2 * 7))
y = reshape_dim_into(-4, -1, x)
self.assertEqual(y, x.movedim(0, 2).reshape(3, 5, 2 * 7))
def test_reshape_dim_outof(self):
x = torch.randn(12, 12, 12).permute(2, 1, 0)
y = reshape_dim_outof(0, 2, x)
self.assertEqual(y, x.reshape(2, 6, 12, 12))
y = reshape_dim_outof(1, 4, x)
self.assertEqual(y, x.reshape(12, 4, 3, 12))
y = reshape_dim_outof(2, 6, x)
self.assertEqual(y, x.reshape(12, 12, 6, 2))
y = reshape_dim_outof(-1, 6, x)
self.assertEqual(y, x.reshape(12, 12, 6, 2))
def test_batch_rule_does_not_need_to_handle_no_batched_input(self):
def f(x, y):
res =
|
torch
|
torch.dot(y, torch.ones(2))
|
return x + res
x = torch.randn(7, 5)
y = torch.randn(3, 2)
|
torch
|
torch.dot
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapAPI(TestCase):
def test_non_tensor_output_raises(self):
with self.assertRaisesRegex(ValueError, "got type <class 'float'> as a return"):
vmap(lambda x: 3.14)(torch.ones(3))
def multiple_outputs(x):
return x, 3
with self.assertRaisesRegex(ValueError, "got type <class 'int'> as a return"):
vmap(multiple_outputs)(torch.ones(3))
def test_different_map_dim_size_raises(self):
x = torch.randn(2)
y = torch.randn(3)
expected_msg = 'Expected all tensors to have the same size in the mapped dimension'
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(torch.mul)(x, y)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
def test_func_with_no_inputs(self):
expected_msg = 'got no inputs'
def foo():
return torch.randn(3)
def bar(x):
return torch.randn(3)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(foo)()
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(bar)()
def test_constant_function(self):
output = vmap(lambda x: torch.tensor(3.14))(torch.ones(3))
self.assertEqual(output, torch.tensor([3.14, 3.14, 3.14]))
def test_single_input(self):
x = torch.randn(2, 3)
def square(x):
return x * x
output = vmap(square)(x)
self.assertEqual(output, x * x)
def test_multiple_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul)(x, y)
self.assertEqual(output, x * y)
def test_multiple_outputs(self):
def foo(x):
return x * x, x * x * x
x = torch.randn(3)
outputs = vmap(foo)(x)
self.assertEqual(outputs[0], x * x)
self.assertEqual(outputs[1], x * x * x)
def test_multiple_outputs2(self):
def returns_tuple_of_tensors(x):
return (x, x)
def returns_list_of_two_tensors(x):
return [x, x]
def returns_list_of_one_tensor(x):
return [x]
x = torch.randn(3)
vmap(returns_tuple_of_tensors)(x)
vmap(returns_list_of_two_tensors)(x)
vmap(returns_list_of_one_tensor)(x)
def test_nested_with_same_map_dim(self):
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
output = vmap(vmap(torch.mul))(x, y)
self.assertEqual(output, x * y)
output = vmap(vmap(vmap(torch.mul)))(x, y)
self.assertEqual(output, x * y)
def test_nested_with_diag_embed(self):
x = torch.randn(3, 3, 5)
output = vmap(vmap(torch.diag_embed))(x)
self.assertEqual(output, torch.diag_embed(x))
def test_nested_with_different_map_dim(self):
x = torch.randn(2, 3)
y = torch.randn(5, 3)
output = vmap(lambda x: vmap(lambda y: x * y)(y))(x)
self.assertEqual(output.shape, (2, 5, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
z = torch.randn(7, 3)
output = vmap(lambda x: vmap(lambda y: vmap(lambda z: x * y * z)(z))(y))(x)
self.assertEqual(output.shape, (2, 5, 7, 3))
self.assertEqual(output, x.view(2, 1, 1, 3) * y.view(5, 1, 3) * z)
def test_noop_in_inner_vmap(self):
x = torch.randn(3)
y = torch.randn(5)
output = vmap(lambda x: vmap(lambda y: x)(y))(x)
self.assertEqual(output, x.view(3, 1).expand(3, 5))
def test_unsupported_op_err_msg(self):
tensor = torch.randn(2, 3)
msg = (
r"Batching rule not implemented for aten::.+; the "
r"fallback path doesn't work on out= or view ops"
)
def out_op(x, y):
return torch.abs(x, out=y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(out_op)(tensor, tensor)
with self.assertRaisesRegex(RuntimeError, 'Batching rule not implemented'):
vmap(torch.equal)(tensor, tensor)
def test_nonzero_out_dims(self):
tensor = torch.randn(2, 3)
result = vmap(lambda x: x, out_dims=1)(tensor)
self.assertEqual(result, tensor.permute(1, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 0, 3))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=-1)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 3, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
other = torch.randn(2, 3, 5, 7)
result = vmap(lambda x, y: (x, y), out_dims=2)(tensor, other)
self.assertEqual(result, (tensor.permute(1, 2, 0, 3), other.permute(1, 2, 0, 3)))
ndims = 64
shape = [2] + [1] * (ndims - 1)
expected_shape = [1, 1, 2] + [1] * (ndims - 3)
tensor = torch.randn(shape)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result.shape, expected_shape)
def foo(x, y):
return x, x * y, x * y * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=1)(x, y)
self.assertEqual(
result,
(x.permute(1, 0, 2), (x * y).permute(1, 0, 2), (x * y * y).permute(1, 0, 2)))
def test_multiple_out_dims(self):
def foo(x):
return x, x
def bar(x, y):
return x, x, x, x * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=(0, 1))(x)
self.assertEqual(result, (x, x.permute(1, 0, 2)))
result = vmap(bar, out_dims=(-1, 0, 1, 2))(x, y)
expected = (
x.permute(1, 2, 0),
x,
x.permute(1, 0, 2),
(x * y).permute(1, 2, 0),
)
self.assertEqual(result, expected)
def test_nested_out_dims(self):
y = torch.randn(2, 3, 5, 7)
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y))(y)
self.assertEqual(result.shape, (2, 5, 3, 7))
self.assertEqual(result, y.permute(0, 2, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y), out_dims=1)(y)
self.assertEqual(result.shape, (5, 2, 3, 7))
self.assertEqual(result, y.permute(2, 0, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=-1)(y), out_dims=-1)(y)
self.assertEqual(result.shape, (5, 7, 3, 2))
self.assertEqual(result, y.permute(2, 3, 1, 0))
x = torch.randn(2, 3)
y = torch.randn(5, 3)
result = vmap(lambda y: vmap(lambda x: x * y, out_dims=1)(x), out_dims=-1)(y)
self.assertEqual(result.shape, (3, 2, 5))
self.assertEqual(result, (y.view(5, 1, 3) * x).permute(2, 1, 0))
def test_out_dims_edge_case(self):
def foo(x):
return x
tensor = torch.randn(2, 3)
expected = vmap(foo, out_dims=1)(tensor)
result = vmap(foo, out_dims=(1,))(tensor)
self.assertEqual(result, expected)
def test_pytree_returns(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y), [y, (y, y)]
y0, (y1, y2), (y3, (y4, y5)) = vmap(f)(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y0, y1)
self.assertEqual(y2, y1)
self.assertEqual(y2, y3)
self.assertEqual(y4, y3)
self.assertEqual(y5, y4)
def test_pytree_odict_returns(self):
x = torch.randn(2, 3)
def f(t):
y = t.sin()
return OrderedDict([("sin", y), ("cos", t.cos())])
out = vmap(f)(x)
assert isinstance(out, OrderedDict)
expected = f(x)
self.assertEqual(out["sin"], expected["sin"])
self.assertEqual(out["cos"], expected["cos"])
def test_pytest_odict_flatten_unflatten(self):
from functorch._src.vmap import _odict_flatten, _odict_unflatten
x = torch.randn(2, 3)
inpt = OrderedDict([("sin", x.sin()), ("cos", x.cos())])
out = _odict_flatten(inpt)
self.assertEqual(out[0], list(inpt.values()))
self.assertEqual(out[1], list(inpt.keys()))
recon_inpt = _odict_unflatten(*out)
self.assertEqual(recon_inpt, inpt)
def test_pytree_returns_outdims(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, (0, 1)))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, x.sin())
self.assertEqual(y2, x.sin().t())
def test_pytree_returns_broadcast_simple(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=1)(x)
self.assertEqual(y0, x.sin().t())
self.assertEqual(y1, y0)
self.assertEqual(y2, y0)
def test_pytree_returns_broadcast_nested(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, 1))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, y0.t())
self.assertEqual(y2, y0.t())
def test_out_dims_must_be_int_or_collection_of_int_err_msg(self):
msg = 'must be an int or a python collection of ints'
tensor = torch.randn(2, 3)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims='lol')(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=('lol',))(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=None)(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(None,))(tensor)
def test_out_dims_and_num_outputs_mismatch_err_msg(self):
msg = 'not compatible'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0, 0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x), out_dims=(0,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0))(x)
def test_out_dim_out_of_bounds_err_msg(self):
msg = 'Dimension out of range'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=3)(x)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=-4)(x)
def test_non_zero_in_dims(self):
tensor = torch.randn(2, 3, 5)
output = vmap(lambda x: x, (1,))(tensor)
self.assertEqual(output, tensor.permute(1, 0, 2))
self.assertEqual(output.data_ptr(), tensor.data_ptr())
x = torch.randn(2, 3)
y = torch.randn(3, 2)
output = vmap(torch.mul, (0, 1))(x, y)
self.assertEqual(output, x * y.t())
output = vmap(torch.mul, (1, 0))(x, y)
self.assertEqual(output, x.t() * y)
def test_none_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (0, None))(x, y)
self.assertEqual(output.shape, (2, 2, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
output = vmap(torch.mul, (0, None))(x, 2)
self.assertEqual(output, x * 2)
def test_nested_non_default_in_dims(self):
x = torch.rand(5, 2, 3)
y = torch.rand(3, 5, 2)
result = vmap(vmap(vmap(torch.mul), (1, 0)), (1, 2))(x, y)
self.assertEqual(result, x.permute(1, 2, 0) * y.permute(2, 0, 1))
def test_nested_negative_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (-1, -1))(x, y)
self.assertEqual(output.shape, (3, 2))
self.assertEqual(output, (x * y).permute(1, 0))
def test_non_default_in_dims_out_dims(self):
x = torch.randn(2, 3, 5)
result = vmap(lambda x: x, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x)
self.assertEqual(result.data_ptr(), x.data_ptr())
result = vmap(lambda x: x, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, x.transpose(1, 2))
self.assertEqual(result.data_ptr(), x.data_ptr())
def foo(x):
return x * 2
result = vmap(foo, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x * 2)
result = vmap(foo, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, (x * 2).transpose(1, 2))
result = vmap(vmap(foo, 1, 1), 1, 1)(x)
self.assertEqual(result, x * 2)
def test_item_throws(self):
def f(x):
return x.item()
with self.assertRaisesRegex(RuntimeError, r'item\(\) on a Tensor'):
vmap(f)(torch.randn(3))
def test_data_dependent_control_flow_throws(self):
def f(x):
if x:
return x
return 0
with self.assertRaisesRegex(RuntimeError, r'data-dependent control flow'):
vmap(f)(torch.randn(3))
def test_accepts_nested_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
out = vmap(lambda z: z[0] + z[1])((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1])([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'])({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=(0,))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out_fn = vmap(lambda z: z['x'][0] + z['x'][1][0] + z['y'][0] + z['y'][1])
out = out_fn({'x': [x, (x,)], 'y': [y, y]})
self.assertEqual(out, x + x + y + y)
def test_in_dims_wrong_type_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'expected `in_dims` to be int or a \(potentially nested\) tuple'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, [0, 0])(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, set({0, 0}))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, 'lol')(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=[0, 0])([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_not_enough_in_dims_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'in_dims is not compatible with the structure of `inputs`'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0,))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0, 0, 0))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0],))([x, y])
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_integer_in_dim_but_not_tensor_input_err_msg(self):
def foo(xy):
return xy[0] * xy[1]
def bar(x, yz):
return x * yz[0] * yz[1]
x = torch.randn(2, 3)
msg = 'Got in_dim=0 for an input but the input is of type'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum)(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum, (0, 0))(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, 1])
vmap(torch.sum, (0, None))(x, 0)
def test_in_dim_not_in_tensor_err_msg(self):
def foo(x):
return x * x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
msg = r'Got in_dim=-?\w for some input, but that input is a Tensor of dimensionality \w'
with self.assertRaisesRegex(ValueError, msg):
vmap(foo)(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(0,))(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(-3,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(2,))(y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([3, 0],))([x, y])
vmap(foo, in_dims=(0,))(torch.randn(2, 3))
vmap(foo, in_dims=(1,))(torch.randn(2, 3))
def test_fallback_does_not_warn_by_default(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
vmap(op)(x, y)
self.assertEqual(len(wa), 1)
@unittest.expectedFailure
def test_fallback_warns_when_warnings_are_enabled(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
with EnableVmapFallbackWarnings():
vmap(op)(x, y)
self.assertEqual(len(wa), 2)
self.assertRegex(str(wa[-1].message), FALLBACK_REGEX)
def _assert_uses_vmap_fallback(self, vmap_args, inputs):
return
def test_fallback_zero_dim(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0, B1 = 0, 3
x = torch.randn(B0, 11)
y = torch.randn(11)
msg = 'The fallback path does not support vmap over dims of size 0'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
x = torch.randn(B0, B1, 11)
y = torch.randn(B1, 11)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
def test_fallback_atan2(self):
op = torch.copysign
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(op, (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(100, 10, 10, 5)
y = torch.randn(100, 10, 10)
result = vmap(vmap(vmap(op)))(x, y)
self.assertEqual(result, op(x, y.view(100, 10, 10, 1)))
@unittest.skip
def test_fallback_masked_fill(self):
def run_test(batch_size):
B0 = batch_size
x = torch.randn(B0, 7, 11, 13)
dim = 0
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 13)
self._assert_uses_vmap_fallback((torch.index_add, (0, None, None, 0)), (x, dim, index, values))
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
expected = torch.index_add(
x, dim + 1, index, values.view(B0, 3, 1, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
run_test(batch_size=1237)
def test_fallback_multiple_returns(self):
B0, B1, B2 = 2, 3, 1237
tensor = torch.randn(B0, 10)
self._assert_uses_vmap_fallback((torch.var_mean,), (tensor,))
result = vmap(torch.var_mean)(tensor)
expected = torch.var_mean(tensor, dim=1)
self.assertEqual(result, expected)
tensor = torch.randn(B0, B1, 10)
result = vmap(vmap(torch.var_mean))(tensor)
expected = torch.var_mean(tensor, dim=2)
self.assertEqual(result, expected)
tensor = torch.randn(B0, B1, B2, 10)
result = vmap(vmap(vmap(torch.var_mean)))(tensor)
expected = torch.var_mean(tensor, dim=3)
self.assertEqual(result, expected)
def test_inplace_fallback_unary(self):
op = Tensor.acos_
B0, B1, B2 = 2, 3, 10000
x = torch.randn(B0, 5)
self._assert_uses_vmap_fallback((op,), (x,))
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op)(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op, out_dims=(1,))(x)
self.assertTrue(result._base is x)
self.assertEqual(result, x_orig.t().acos())
x_orig = torch.randn(B0, B1, 5)
x = x_orig.clone()
result = vmap(vmap(op))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
result = vmap(vmap(vmap(op)))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
def test_inplace_fallback_nary_same_levels(self):
op = Tensor.atan2_
outplace_op = torch.atan2
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0 = 5
x_orig = torch.randn(7, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, 7, 11)
vmap(op, (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim(0, 2)))
B0, B1 = 5, 7
x_orig = torch.randn(B1, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, B1, 11)
vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim([0, 1], [2, 0])))
B0, B1, B2 = 100, 10, 10
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
y = torch.randn(B0, B1, B2)
vmap(vmap(vmap(op)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, B1, B2, 1)))
@unittest.expectedFailure
def test_inplace_fallback_nary_different_levels(self):
op = Tensor.atan2_
outplace_op = torch.atan2
B0, B1 = 2, 3
x = torch.rand(B0, 7)
y = torch.rand(7)
self._assert_uses_vmap_fallback((op, (0, None)), (x, y))
x_orig = torch.rand(B0, 7)
x = x_orig.clone()
y = torch.rand(7)
vmap(op, in_dims=(0, None))(x, y)
self.assertEqual(x, outplace_op(x_orig, y))
x_orig = torch.rand(B0, B1, 7)
x = x_orig.clone()
y = torch.rand(B0, 7)
vmap(vmap(op, in_dims=(0, None)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, 1, 7)))
msg = r'vmap: aten::atan2_\(self, \*extra_args\) is not possible'
x = torch.rand(7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(7, B0)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 1))(x, y)
x = torch.rand(B0, 7)
y = torch.rand(B0, B1, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(None, 0)))(x, y)
def test_backward_unsupported_interaction(self):
x = torch.randn(3, requires_grad=True)
y = torch.randn(5)
grad = torch.randn_like(x)
err_msg = r'backward\(\) called inside a functorch transform'
def backward_on_vmapped_tensor(x):
x.sum().backward()
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_on_vmapped_tensor)(x)
def backward_with_vmapped_grad(x, grad):
x.backward(grad)
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_with_vmapped_grad)(x, grad)
def completely_unrelated_backward(y):
x.sum().backward()
return y
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(completely_unrelated_backward)(y)
@unittest.expectedFailure
def test_grad_unsupported_interaction(self):
input_tensor = torch.randn(3, requires_grad=True)
err_msg = 'autograd.grad.* called inside torch.vmap'
captured = torch.randn(3, requires_grad=True)
def output_to_grad_is_vmapped(input_tensor):
output = (captured * input_tensor).sum()
return torch.autograd.grad([output], [captured])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(output_to_grad_is_vmapped)(input_tensor)
output = (input_tensor ** 2).sum()
def input_to_grad_is_vmapped(input_tensor):
return torch.autograd.grad([output], [input_tensor])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(input_to_grad_is_vmapped)(input_tensor)
def test_batched_gradient_basic(self):
N = 3
x = torch.randn(N, requires_grad=True)
y = torch.randn(N)
def vjp_mul(v):
return torch.autograd.grad([x * y], [x], grad_outputs=[v])[0]
batched_v = torch.eye(N)
jacobian = vmap(vjp_mul)(batched_v)
self.assertEqual(jacobian, torch.diagflat(y))
def test_functools_partial(self):
x = torch.randn(3)
y = torch.randn(2, 3)
result = vmap(functools.partial(torch.mul, x))(y)
self.assertEqual(result, x * y)
def test_nn_module(self):
tensor = torch.randn(2, 3)
model = torch.nn.Linear(3, 3, bias=False)
result = vmap(model)(tensor)
self.assertEqual(result, model(tensor))
def test_fallback_with_undefined_grad(self):
B0 = 7
x = torch.randn(2, 3, 4, 5, requires_grad=True)
weight = torch.randn(3, 3, 1, 1)
v = torch.randn(B0, 2, 3, 4, 5)
def get_vjp(v):
result = torch.nn.functional.conv2d(x, weight)
grad_x, = torch.autograd.grad(result, x, v)
return grad_x
self._assert_uses_vmap_fallback([get_vjp], [v])
def test_reshape_dim_into(self):
x = torch.randn(2, 3, 5, 7)
y = reshape_dim_into(0, 0, x)
self.assertEqual(y, x.reshape(6, 5, 7))
y = reshape_dim_into(0, 1, x)
self.assertEqual(y, x.movedim(0, 1).reshape(3, 2 * 5, 7))
y = reshape_dim_into(0, 2, x)
self.assertEqual(y, x.movedim(0, 2).reshape(3, 5, 2 * 7))
y = reshape_dim_into(1, 2, x)
self.assertEqual(y, x.movedim(1, 2).reshape(2, 5, 3 * 7))
y = reshape_dim_into(0, -2, x)
self.assertEqual(y, x.movedim(0, 1).reshape(3, 2 * 5, 7))
y = reshape_dim_into(0, -1, x)
self.assertEqual(y, x.movedim(0, 2).reshape(3, 5, 2 * 7))
y = reshape_dim_into(-4, -1, x)
self.assertEqual(y, x.movedim(0, 2).reshape(3, 5, 2 * 7))
def test_reshape_dim_outof(self):
x = torch.randn(12, 12, 12).permute(2, 1, 0)
y = reshape_dim_outof(0, 2, x)
self.assertEqual(y, x.reshape(2, 6, 12, 12))
y = reshape_dim_outof(1, 4, x)
self.assertEqual(y, x.reshape(12, 4, 3, 12))
y = reshape_dim_outof(2, 6, x)
self.assertEqual(y, x.reshape(12, 12, 6, 2))
y = reshape_dim_outof(-1, 6, x)
self.assertEqual(y, x.reshape(12, 12, 6, 2))
def test_batch_rule_does_not_need_to_handle_no_batched_input(self):
def f(x, y):
res = torch.dot(y, torch.ones(2))
return x + res
x = torch.randn(7, 5)
y = torch.randn(3, 2)
out = vmap(vmap(f, in_dims=(0, None)), in_dims=(None, 0))(x, y)
expected =
|
torch
|
torch.mv(y, torch.ones(2)).view(3, 1, 1)
|
+ x
self.assertEqual(out, expected)
def _test_vmap_autocast(self, device):
|
torch
|
torch.mv.view
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapAPI(TestCase):
def test_non_tensor_output_raises(self):
with self.assertRaisesRegex(ValueError, "got type <class 'float'> as a return"):
vmap(lambda x: 3.14)(torch.ones(3))
def multiple_outputs(x):
return x, 3
with self.assertRaisesRegex(ValueError, "got type <class 'int'> as a return"):
vmap(multiple_outputs)(torch.ones(3))
def test_different_map_dim_size_raises(self):
x = torch.randn(2)
y = torch.randn(3)
expected_msg = 'Expected all tensors to have the same size in the mapped dimension'
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(torch.mul)(x, y)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
def test_func_with_no_inputs(self):
expected_msg = 'got no inputs'
def foo():
return torch.randn(3)
def bar(x):
return torch.randn(3)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(foo)()
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(bar)()
def test_constant_function(self):
output = vmap(lambda x: torch.tensor(3.14))(torch.ones(3))
self.assertEqual(output, torch.tensor([3.14, 3.14, 3.14]))
def test_single_input(self):
x = torch.randn(2, 3)
def square(x):
return x * x
output = vmap(square)(x)
self.assertEqual(output, x * x)
def test_multiple_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul)(x, y)
self.assertEqual(output, x * y)
def test_multiple_outputs(self):
def foo(x):
return x * x, x * x * x
x = torch.randn(3)
outputs = vmap(foo)(x)
self.assertEqual(outputs[0], x * x)
self.assertEqual(outputs[1], x * x * x)
def test_multiple_outputs2(self):
def returns_tuple_of_tensors(x):
return (x, x)
def returns_list_of_two_tensors(x):
return [x, x]
def returns_list_of_one_tensor(x):
return [x]
x = torch.randn(3)
vmap(returns_tuple_of_tensors)(x)
vmap(returns_list_of_two_tensors)(x)
vmap(returns_list_of_one_tensor)(x)
def test_nested_with_same_map_dim(self):
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
output = vmap(vmap(torch.mul))(x, y)
self.assertEqual(output, x * y)
output = vmap(vmap(vmap(torch.mul)))(x, y)
self.assertEqual(output, x * y)
def test_nested_with_diag_embed(self):
x = torch.randn(3, 3, 5)
output = vmap(vmap(torch.diag_embed))(x)
self.assertEqual(output, torch.diag_embed(x))
def test_nested_with_different_map_dim(self):
x = torch.randn(2, 3)
y = torch.randn(5, 3)
output = vmap(lambda x: vmap(lambda y: x * y)(y))(x)
self.assertEqual(output.shape, (2, 5, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
z = torch.randn(7, 3)
output = vmap(lambda x: vmap(lambda y: vmap(lambda z: x * y * z)(z))(y))(x)
self.assertEqual(output.shape, (2, 5, 7, 3))
self.assertEqual(output, x.view(2, 1, 1, 3) * y.view(5, 1, 3) * z)
def test_noop_in_inner_vmap(self):
x = torch.randn(3)
y = torch.randn(5)
output = vmap(lambda x: vmap(lambda y: x)(y))(x)
self.assertEqual(output, x.view(3, 1).expand(3, 5))
def test_unsupported_op_err_msg(self):
tensor = torch.randn(2, 3)
msg = (
r"Batching rule not implemented for aten::.+; the "
r"fallback path doesn't work on out= or view ops"
)
def out_op(x, y):
return torch.abs(x, out=y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(out_op)(tensor, tensor)
with self.assertRaisesRegex(RuntimeError, 'Batching rule not implemented'):
vmap(torch.equal)(tensor, tensor)
def test_nonzero_out_dims(self):
tensor = torch.randn(2, 3)
result = vmap(lambda x: x, out_dims=1)(tensor)
self.assertEqual(result, tensor.permute(1, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 0, 3))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=-1)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 3, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
tensor = torch.randn(2, 3, 5, 7)
other = torch.randn(2, 3, 5, 7)
result = vmap(lambda x, y: (x, y), out_dims=2)(tensor, other)
self.assertEqual(result, (tensor.permute(1, 2, 0, 3), other.permute(1, 2, 0, 3)))
ndims = 64
shape = [2] + [1] * (ndims - 1)
expected_shape = [1, 1, 2] + [1] * (ndims - 3)
tensor = torch.randn(shape)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result.shape, expected_shape)
def foo(x, y):
return x, x * y, x * y * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=1)(x, y)
self.assertEqual(
result,
(x.permute(1, 0, 2), (x * y).permute(1, 0, 2), (x * y * y).permute(1, 0, 2)))
def test_multiple_out_dims(self):
def foo(x):
return x, x
def bar(x, y):
return x, x, x, x * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=(0, 1))(x)
self.assertEqual(result, (x, x.permute(1, 0, 2)))
result = vmap(bar, out_dims=(-1, 0, 1, 2))(x, y)
expected = (
x.permute(1, 2, 0),
x,
x.permute(1, 0, 2),
(x * y).permute(1, 2, 0),
)
self.assertEqual(result, expected)
def test_nested_out_dims(self):
y = torch.randn(2, 3, 5, 7)
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y))(y)
self.assertEqual(result.shape, (2, 5, 3, 7))
self.assertEqual(result, y.permute(0, 2, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y), out_dims=1)(y)
self.assertEqual(result.shape, (5, 2, 3, 7))
self.assertEqual(result, y.permute(2, 0, 1, 3))
result = vmap(lambda y: vmap(lambda x: x, out_dims=-1)(y), out_dims=-1)(y)
self.assertEqual(result.shape, (5, 7, 3, 2))
self.assertEqual(result, y.permute(2, 3, 1, 0))
x = torch.randn(2, 3)
y = torch.randn(5, 3)
result = vmap(lambda y: vmap(lambda x: x * y, out_dims=1)(x), out_dims=-1)(y)
self.assertEqual(result.shape, (3, 2, 5))
self.assertEqual(result, (y.view(5, 1, 3) * x).permute(2, 1, 0))
def test_out_dims_edge_case(self):
def foo(x):
return x
tensor = torch.randn(2, 3)
expected = vmap(foo, out_dims=1)(tensor)
result = vmap(foo, out_dims=(1,))(tensor)
self.assertEqual(result, expected)
def test_pytree_returns(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y), [y, (y, y)]
y0, (y1, y2), (y3, (y4, y5)) = vmap(f)(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y0, y1)
self.assertEqual(y2, y1)
self.assertEqual(y2, y3)
self.assertEqual(y4, y3)
self.assertEqual(y5, y4)
def test_pytree_odict_returns(self):
x = torch.randn(2, 3)
def f(t):
y = t.sin()
return OrderedDict([("sin", y), ("cos", t.cos())])
out = vmap(f)(x)
assert isinstance(out, OrderedDict)
expected = f(x)
self.assertEqual(out["sin"], expected["sin"])
self.assertEqual(out["cos"], expected["cos"])
def test_pytest_odict_flatten_unflatten(self):
from functorch._src.vmap import _odict_flatten, _odict_unflatten
x = torch.randn(2, 3)
inpt = OrderedDict([("sin", x.sin()), ("cos", x.cos())])
out = _odict_flatten(inpt)
self.assertEqual(out[0], list(inpt.values()))
self.assertEqual(out[1], list(inpt.keys()))
recon_inpt = _odict_unflatten(*out)
self.assertEqual(recon_inpt, inpt)
def test_pytree_returns_outdims(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, (0, 1)))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, x.sin())
self.assertEqual(y2, x.sin().t())
def test_pytree_returns_broadcast_simple(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=1)(x)
self.assertEqual(y0, x.sin().t())
self.assertEqual(y1, y0)
self.assertEqual(y2, y0)
def test_pytree_returns_broadcast_nested(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, 1))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, y0.t())
self.assertEqual(y2, y0.t())
def test_out_dims_must_be_int_or_collection_of_int_err_msg(self):
msg = 'must be an int or a python collection of ints'
tensor = torch.randn(2, 3)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims='lol')(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=('lol',))(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=None)(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(None,))(tensor)
def test_out_dims_and_num_outputs_mismatch_err_msg(self):
msg = 'not compatible'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0, 0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x), out_dims=(0,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0))(x)
def test_out_dim_out_of_bounds_err_msg(self):
msg = 'Dimension out of range'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=3)(x)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=-4)(x)
def test_non_zero_in_dims(self):
tensor = torch.randn(2, 3, 5)
output = vmap(lambda x: x, (1,))(tensor)
self.assertEqual(output, tensor.permute(1, 0, 2))
self.assertEqual(output.data_ptr(), tensor.data_ptr())
x = torch.randn(2, 3)
y = torch.randn(3, 2)
output = vmap(torch.mul, (0, 1))(x, y)
self.assertEqual(output, x * y.t())
output = vmap(torch.mul, (1, 0))(x, y)
self.assertEqual(output, x.t() * y)
def test_none_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (0, None))(x, y)
self.assertEqual(output.shape, (2, 2, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
output = vmap(torch.mul, (0, None))(x, 2)
self.assertEqual(output, x * 2)
def test_nested_non_default_in_dims(self):
x = torch.rand(5, 2, 3)
y = torch.rand(3, 5, 2)
result = vmap(vmap(vmap(torch.mul), (1, 0)), (1, 2))(x, y)
self.assertEqual(result, x.permute(1, 2, 0) * y.permute(2, 0, 1))
def test_nested_negative_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (-1, -1))(x, y)
self.assertEqual(output.shape, (3, 2))
self.assertEqual(output, (x * y).permute(1, 0))
def test_non_default_in_dims_out_dims(self):
x = torch.randn(2, 3, 5)
result = vmap(lambda x: x, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x)
self.assertEqual(result.data_ptr(), x.data_ptr())
result = vmap(lambda x: x, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, x.transpose(1, 2))
self.assertEqual(result.data_ptr(), x.data_ptr())
def foo(x):
return x * 2
result = vmap(foo, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x * 2)
result = vmap(foo, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, (x * 2).transpose(1, 2))
result = vmap(vmap(foo, 1, 1), 1, 1)(x)
self.assertEqual(result, x * 2)
def test_item_throws(self):
def f(x):
return x.item()
with self.assertRaisesRegex(RuntimeError, r'item\(\) on a Tensor'):
vmap(f)(torch.randn(3))
def test_data_dependent_control_flow_throws(self):
def f(x):
if x:
return x
return 0
with self.assertRaisesRegex(RuntimeError, r'data-dependent control flow'):
vmap(f)(torch.randn(3))
def test_accepts_nested_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
out = vmap(lambda z: z[0] + z[1])((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1])([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'])({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=(0,))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out_fn = vmap(lambda z: z['x'][0] + z['x'][1][0] + z['y'][0] + z['y'][1])
out = out_fn({'x': [x, (x,)], 'y': [y, y]})
self.assertEqual(out, x + x + y + y)
def test_in_dims_wrong_type_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'expected `in_dims` to be int or a \(potentially nested\) tuple'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, [0, 0])(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, set({0, 0}))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, 'lol')(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=[0, 0])([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_not_enough_in_dims_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'in_dims is not compatible with the structure of `inputs`'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0,))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0, 0, 0))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0],))([x, y])
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))([x, y])
vmap(torch.mul, (0, 0))(x, y)
def test_integer_in_dim_but_not_tensor_input_err_msg(self):
def foo(xy):
return xy[0] * xy[1]
def bar(x, yz):
return x * yz[0] * yz[1]
x = torch.randn(2, 3)
msg = 'Got in_dim=0 for an input but the input is of type'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum)(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum, (0, 0))(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, 1])
vmap(torch.sum, (0, None))(x, 0)
def test_in_dim_not_in_tensor_err_msg(self):
def foo(x):
return x * x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
msg = r'Got in_dim=-?\w for some input, but that input is a Tensor of dimensionality \w'
with self.assertRaisesRegex(ValueError, msg):
vmap(foo)(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(0,))(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(-3,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(2,))(y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([3, 0],))([x, y])
vmap(foo, in_dims=(0,))(torch.randn(2, 3))
vmap(foo, in_dims=(1,))(torch.randn(2, 3))
def test_fallback_does_not_warn_by_default(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
vmap(op)(x, y)
self.assertEqual(len(wa), 1)
@unittest.expectedFailure
def test_fallback_warns_when_warnings_are_enabled(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
with EnableVmapFallbackWarnings():
vmap(op)(x, y)
self.assertEqual(len(wa), 2)
self.assertRegex(str(wa[-1].message), FALLBACK_REGEX)
def _assert_uses_vmap_fallback(self, vmap_args, inputs):
return
def test_fallback_zero_dim(self):
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0, B1 = 0, 3
x = torch.randn(B0, 11)
y = torch.randn(11)
msg = 'The fallback path does not support vmap over dims of size 0'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
x = torch.randn(B0, B1, 11)
y = torch.randn(B1, 11)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
def test_fallback_atan2(self):
op = torch.copysign
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(op, (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
x = torch.randn(100, 10, 10, 5)
y = torch.randn(100, 10, 10)
result = vmap(vmap(vmap(op)))(x, y)
self.assertEqual(result, op(x, y.view(100, 10, 10, 1)))
@unittest.skip
def test_fallback_masked_fill(self):
def run_test(batch_size):
B0 = batch_size
x = torch.randn(B0, 7, 11, 13)
dim = 0
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 13)
self._assert_uses_vmap_fallback((torch.index_add, (0, None, None, 0)), (x, dim, index, values))
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
expected = torch.index_add(
x, dim + 1, index, values.view(B0, 3, 1, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
run_test(batch_size=1237)
def test_fallback_multiple_returns(self):
B0, B1, B2 = 2, 3, 1237
tensor = torch.randn(B0, 10)
self._assert_uses_vmap_fallback((torch.var_mean,), (tensor,))
result = vmap(torch.var_mean)(tensor)
expected = torch.var_mean(tensor, dim=1)
self.assertEqual(result, expected)
tensor = torch.randn(B0, B1, 10)
result = vmap(vmap(torch.var_mean))(tensor)
expected = torch.var_mean(tensor, dim=2)
self.assertEqual(result, expected)
tensor = torch.randn(B0, B1, B2, 10)
result = vmap(vmap(vmap(torch.var_mean)))(tensor)
expected = torch.var_mean(tensor, dim=3)
self.assertEqual(result, expected)
def test_inplace_fallback_unary(self):
op = Tensor.acos_
B0, B1, B2 = 2, 3, 10000
x = torch.randn(B0, 5)
self._assert_uses_vmap_fallback((op,), (x,))
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op)(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op, out_dims=(1,))(x)
self.assertTrue(result._base is x)
self.assertEqual(result, x_orig.t().acos())
x_orig = torch.randn(B0, B1, 5)
x = x_orig.clone()
result = vmap(vmap(op))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
result = vmap(vmap(vmap(op)))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
def test_inplace_fallback_nary_same_levels(self):
op = Tensor.atan2_
outplace_op = torch.atan2
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0 = 5
x_orig = torch.randn(7, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, 7, 11)
vmap(op, (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim(0, 2)))
B0, B1 = 5, 7
x_orig = torch.randn(B1, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, B1, 11)
vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim([0, 1], [2, 0])))
B0, B1, B2 = 100, 10, 10
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
y = torch.randn(B0, B1, B2)
vmap(vmap(vmap(op)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, B1, B2, 1)))
@unittest.expectedFailure
def test_inplace_fallback_nary_different_levels(self):
op = Tensor.atan2_
outplace_op = torch.atan2
B0, B1 = 2, 3
x = torch.rand(B0, 7)
y = torch.rand(7)
self._assert_uses_vmap_fallback((op, (0, None)), (x, y))
x_orig = torch.rand(B0, 7)
x = x_orig.clone()
y = torch.rand(7)
vmap(op, in_dims=(0, None))(x, y)
self.assertEqual(x, outplace_op(x_orig, y))
x_orig = torch.rand(B0, B1, 7)
x = x_orig.clone()
y = torch.rand(B0, 7)
vmap(vmap(op, in_dims=(0, None)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, 1, 7)))
msg = r'vmap: aten::atan2_\(self, \*extra_args\) is not possible'
x = torch.rand(7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(7, B0)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 1))(x, y)
x = torch.rand(B0, 7)
y = torch.rand(B0, B1, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(None, 0)))(x, y)
def test_backward_unsupported_interaction(self):
x = torch.randn(3, requires_grad=True)
y = torch.randn(5)
grad = torch.randn_like(x)
err_msg = r'backward\(\) called inside a functorch transform'
def backward_on_vmapped_tensor(x):
x.sum().backward()
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_on_vmapped_tensor)(x)
def backward_with_vmapped_grad(x, grad):
x.backward(grad)
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_with_vmapped_grad)(x, grad)
def completely_unrelated_backward(y):
x.sum().backward()
return y
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(completely_unrelated_backward)(y)
@unittest.expectedFailure
def test_grad_unsupported_interaction(self):
input_tensor = torch.randn(3, requires_grad=True)
err_msg = 'autograd.grad.* called inside torch.vmap'
captured = torch.randn(3, requires_grad=True)
def output_to_grad_is_vmapped(input_tensor):
output = (captured * input_tensor).sum()
return torch.autograd.grad([output], [captured])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(output_to_grad_is_vmapped)(input_tensor)
output = (input_tensor ** 2).sum()
def input_to_grad_is_vmapped(input_tensor):
return torch.autograd.grad([output], [input_tensor])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(input_to_grad_is_vmapped)(input_tensor)
def test_batched_gradient_basic(self):
N = 3
x = torch.randn(N, requires_grad=True)
y = torch.randn(N)
def vjp_mul(v):
return torch.autograd.grad([x * y], [x], grad_outputs=[v])[0]
batched_v = torch.eye(N)
jacobian = vmap(vjp_mul)(batched_v)
self.assertEqual(jacobian, torch.diagflat(y))
def test_functools_partial(self):
x = torch.randn(3)
y = torch.randn(2, 3)
result = vmap(functools.partial(torch.mul, x))(y)
self.assertEqual(result, x * y)
def test_nn_module(self):
tensor = torch.randn(2, 3)
model = torch.nn.Linear(3, 3, bias=False)
result = vmap(model)(tensor)
self.assertEqual(result, model(tensor))
def test_fallback_with_undefined_grad(self):
B0 = 7
x = torch.randn(2, 3, 4, 5, requires_grad=True)
weight = torch.randn(3, 3, 1, 1)
v = torch.randn(B0, 2, 3, 4, 5)
def get_vjp(v):
result = torch.nn.functional.conv2d(x, weight)
grad_x, = torch.autograd.grad(result, x, v)
return grad_x
self._assert_uses_vmap_fallback([get_vjp], [v])
def test_reshape_dim_into(self):
x = torch.randn(2, 3, 5, 7)
y = reshape_dim_into(0, 0, x)
self.assertEqual(y, x.reshape(6, 5, 7))
y = reshape_dim_into(0, 1, x)
self.assertEqual(y, x.movedim(0, 1).reshape(3, 2 * 5, 7))
y = reshape_dim_into(0, 2, x)
self.assertEqual(y, x.movedim(0, 2).reshape(3, 5, 2 * 7))
y = reshape_dim_into(1, 2, x)
self.assertEqual(y, x.movedim(1, 2).reshape(2, 5, 3 * 7))
y = reshape_dim_into(0, -2, x)
self.assertEqual(y, x.movedim(0, 1).reshape(3, 2 * 5, 7))
y = reshape_dim_into(0, -1, x)
self.assertEqual(y, x.movedim(0, 2).reshape(3, 5, 2 * 7))
y = reshape_dim_into(-4, -1, x)
self.assertEqual(y, x.movedim(0, 2).reshape(3, 5, 2 * 7))
def test_reshape_dim_outof(self):
x = torch.randn(12, 12, 12).permute(2, 1, 0)
y = reshape_dim_outof(0, 2, x)
self.assertEqual(y, x.reshape(2, 6, 12, 12))
y = reshape_dim_outof(1, 4, x)
self.assertEqual(y, x.reshape(12, 4, 3, 12))
y = reshape_dim_outof(2, 6, x)
self.assertEqual(y, x.reshape(12, 12, 6, 2))
y = reshape_dim_outof(-1, 6, x)
self.assertEqual(y, x.reshape(12, 12, 6, 2))
def test_batch_rule_does_not_need_to_handle_no_batched_input(self):
def f(x, y):
res = torch.dot(y, torch.ones(2))
return x + res
x = torch.randn(7, 5)
y = torch.randn(3, 2)
out = vmap(vmap(f, in_dims=(0, None)), in_dims=(None, 0))(x, y)
expected = torch.mv(y, torch.ones(2)).view(3, 1, 1) + x
self.assertEqual(out, expected)
def _test_vmap_autocast(self, device):
if torch.device(device).type == "cpu":
amp_dtype = torch.bfloat16
else:
amp_dtype = torch.float16
a_float32 = torch.rand(4, 2, 3, device=device)
b_float32 = torch.rand(4, 3, 2, device=device)
c_float32 = torch.rand(4, 2, 2, device=device)
d_float32 = torch.rand(4, 3, 2, device=device)
def func1(x, y, z, w):
with torch.autocast(dtype=amp_dtype, device_type=device):
e_float16 =
|
torch
|
torch.matmul(x, y)
|
assert e_float16.dtype == amp_dtype, e_float16.dtype
f_float16 = torch.matmul(z, e_float16)
assert f_float16.dtype == amp_dtype, f_float16.dtype
return torch.matmul(w, f_float16.float())
|
torch
|
torch.matmul
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapOperators(Namespace.TestVmapBase):
def _vmap_test(self, *args, **kwargs):
return _vmap_test(self, *args, **kwargs)
def _vmap_view_test(self, *args, **kwargs):
self._vmap_test(*args, **kwargs, check_view=True)
def _test_unary(self, op, getter, device, *args, **kwargs):
test = functools.partial(self._vmap_test, *args, **kwargs)
B0, B1 = 7, 11
test(op, [getter([B0, 3], device)])
test(op, [getter([2, 5, B0, 3], device)], in_dims=2)
test(op, [getter([2, 5, B0, 3], device)], in_dims=2, out_dims=2)
test(vmap(op), [getter([B0, B1], device)])
test(vmap(op), [getter([B1, 2, 5, B0, 3], device)], in_dims=2)
test(vmap(op, in_dims=2), [getter([2, 5, B0, B1, 3], device)],
in_dims=2, out_dims=2)
@parametrize("case", [
(torch.abs, TensorFactory.randn),
(torch.acos, TensorFactory.rand),
(torch.asin, TensorFactory.rand),
(torch.atan, TensorFactory.rand),
(torch.ceil, TensorFactory.randn),
(torch.cos, TensorFactory.rand),
(torch.cosh, TensorFactory.rand),
(torch.digamma, TensorFactory.rand),
(torch.exp, TensorFactory.randn),
(torch.expm1, TensorFactory.randn),
(torch.floor, TensorFactory.randn),
(torch.frac, TensorFactory.randn),
(torch.lgamma, TensorFactory.rand),
(torch.log, TensorFactory.randp1),
(torch.log10, TensorFactory.randp1),
(torch.log1p, TensorFactory.randp1),
(torch.log2, TensorFactory.randp1),
(torch.neg, TensorFactory.randn),
(torch.reciprocal, TensorFactory.randp1),
(torch.relu, TensorFactory.randn),
(torch.round, TensorFactory.randn),
(torch.rsqrt, TensorFactory.randp1),
(torch.sigmoid, TensorFactory.randn),
(torch.sign, TensorFactory.randn),
(torch.sin, TensorFactory.rand),
(torch.sinh, TensorFactory.rand),
(torch.sqrt, TensorFactory.rand),
(torch.tan, TensorFactory.rand),
(torch.tanh, TensorFactory.rand),
(torch.trunc, TensorFactory.randn),
], name_fn=lambda x: x[0].__name__)
def test_unary_pointwise(self, case):
op, getter = case
self._test_unary(op, getter, 'cpu')
method = getattr(Tensor, f'{op.__name__ + "_"}')
self._test_unary(method, getter, 'cpu', check_propagates_grad=False)
def test_clone(self):
self._test_unary(lambda x: x.clone(), TensorFactory.randn, 'cpu')
self._test_unary(lambda x: x.clone(memory_format=torch.preserve_format),
TensorFactory.randn, 'cpu')
self._test_unary(lambda x: x.clone(memory_format=torch.contiguous_format),
TensorFactory.randn, 'cpu')
def clone_contiguous(x):
return x.clone(memory_format=torch.contiguous_format)
B0, B1 = 3, 5
x = torch.randn(2, B0, 7)
y = vmap(clone_contiguous, in_dims=1, out_dims=1)(x)
self.assertTrue(y.movedim(1, 0).is_contiguous())
self.assertTrue(y[:, 0, :].is_contiguous())
x = torch.randn(2, B0, 7, B1)
y = vmap(vmap(clone_contiguous, in_dims=2), in_dims=1)(x)
self.assertTrue(y.is_contiguous())
self.assertTrue(y[0][0].is_contiguous())
msg = r'only supported with memory_format torch.preserve_format or torch.contiguous_format'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(lambda x: x.clone(memory_format=torch.channels_last))(torch.randn(B0))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(lambda x: x.clone(memory_format=torch.channels_last_3d))(torch.randn(B0))
def test_weird_matmul_case(self):
x = torch.randn(5, 2, 2, 2)
y = torch.randn(5, 7, 2)
vmap(vmap(torch.matmul, in_dims=(None, 0)))(x, y)
@parametrize("case",
(
(torch.clamp_min_, TensorFactory.randn),
(torch.clamp_max_, TensorFactory.randn),
), name_fn=lambda x: x[0].__name__)
def test_clamp_inplace_variant(self, case):
test = self._vmap_test
def get_number(getter):
return getter([]).item()
op, getter = case
device = 'cpu'
B0, B1 = 7, 11
test(op, (getter([B0, 3], device), getter([B0, 3], device)), check_propagates_grad=False)
test(op, (getter([B0], device), getter([B0], device)), check_propagates_grad=False)
test(op, (getter([2, B0, 3], device), getter([2, B0, 3], device)), in_dims=(1, 1), check_propagates_grad=False)
test(op, (getter([B0, 2, 3], device), getter([2, B0, 3], device)),
in_dims=(0, 1), out_dims=1, check_propagates_grad=False)
test(op, (getter([B0, 2, 3], device), getter([1, 1], device)), in_dims=(0, None), check_propagates_grad=False)
test(op, (getter([B0, 3], device), getter([B0, 3], device)), in_dims=(0, 0), check_propagates_grad=False)
test(vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 1, 3], device)), check_propagates_grad=False)
number = get_number(getter)
self._test_unary(lambda t: op(t, number), getter, device, check_propagates_grad=False)
@parametrize('case', [
subtest(_make_case(torch.clamp_min), name='clamp_min'),
subtest(_make_case(torch.clamp_max), name='clamp_max'),
])
def test_clamp_variant(self, case):
test = self._vmap_test
def get_number(getter):
return getter([]).item()
op, getter = case
device = 'cpu'
B0, B1 = 7, 11
test(op, (getter([B0, 3], device), getter([B0, 3], device)))
test(op, (getter([B0], device), getter([B0, 2, 3], device)))
test(op, (getter([B0], device), getter([2, B0, 3], device)), in_dims=(0, 1))
test(op, (getter([B0], device), getter([2, B0, 3], device)),
in_dims=(0, 1), out_dims=1)
test(op, (getter([B0], device), getter([2, 3], device)), in_dims=(0, None))
test(op, (getter([2, 3], device), getter([B0, 3], device)), in_dims=(None, 0))
test(vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 3], device)))
test(vmap(op, in_dims=(None, 0)),
(getter([B0, 2, 3], device), getter([B1, 3], device)), in_dims=(0, None))
number = get_number(getter)
self._test_unary(lambda t: op(t, number), getter, device)
def test_copy_(self):
x = torch.randn(3)
y = torch.randn(3)
vmap(Tensor.copy_)(x, y)
self.assertEqual(x, y)
x = torch.randn(3)
y = torch.randn(3, 2)
vmap(Tensor.copy_, in_dims=(1, None))(y, x)
self.assertEqual(y, x.expand(2, 3).t())
x = torch.randn(3)
y = torch.randn(2, 3)
with self.assertRaisesRegex(RuntimeError, 'inplace'):
vmap(Tensor.copy_, in_dims=(None, 0))(x, y)
@parametrize('case', [
subtest(_make_case(torch.add), name='add'),
subtest(_make_case(lambda x, y: x + y), name='add_dunder'),
subtest(_make_case(torch.sub), name='sub'),
subtest(_make_case(lambda x, y: x - y), name='sub_dunder'),
subtest(_make_case(torch.mul), name='mul'),
subtest(_make_case(lambda x, y: x * y), name='mul_dunder'),
subtest(_make_case(torch.div, input_getter=TensorFactory.randp1), name='div'),
subtest(_make_case(lambda x, y: x / y, input_getter=TensorFactory.randp1), name='div_dunder'),
subtest(_make_case(torch.pow, input_getter=TensorFactory.randp1), name='pow'),
subtest(_make_case(lambda x, y: x ** y, input_getter=TensorFactory.randp1), name='pow_dunder'),
])
def test_arithmetic(self, case):
test = self._vmap_test
def get_number(getter):
return getter([]).item()
op, getter = case
device = 'cpu'
B0, B1 = 7, 11
test(op, (getter([B0, 3], device), getter([B0, 3], device)))
test(op, (getter([B0], device), getter([B0, 2, 3], device)))
test(op, (getter([B0], device), getter([2, B0, 3], device)), in_dims=(0, 1))
test(op, (getter([B0], device), getter([2, B0, 3], device)),
in_dims=(0, 1), out_dims=1)
test(op, (getter([B0], device), getter([2, 3], device)), in_dims=(0, None))
test(op, (getter([2, 3], device), getter([B0, 3], device)), in_dims=(0, None))
test(vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 3], device)))
test(vmap(op, in_dims=(None, 0)),
(getter([B0, 2, 3], device), getter([B1, 3], device)), in_dims=(0, None))
number = get_number(getter)
self._test_unary(lambda t: op(t, number), getter, device)
number = get_number(getter)
self._test_unary(lambda t: op(number, t), getter, device)
test(op, (getter([B0], device), getter([B0], device, dtype=torch.double)))
test(op, (getter([B0], device, dtype=torch.double), getter([B0], device)))
test(op, (getter([B0], device), getter([B0], device)))
test(op, (getter([B0, 2], device), getter([B0], device, torch.double)))
test(op, (getter([B0], device, torch.double), getter([B0, 2], device)))
if not torch.cuda.is_available():
return
@unittest.expectedFailure
def test_as_strided(self):
def _test(sizes, strides, offset, tensor, lambd):
result = vmap(lambda t: t.as_strided(sizes, strides, offset))(tensor)
expected = vmap(lambd)(tensor)
self.assertTrue(result._base is expected._base)
self.assertEqual(result, expected)
B0 = 5
tensors = [
torch.randn(B0, 2, 3),
torch.randn(B0, 3, 2).transpose(1, 2),
torch.randn(2, B0, 2, 3)[1],
torch.randn(B0, 2, 4, 3, 7)[:, :, 0, :, 0],
torch.randn(B0, 2, 4, 3, 7)[:, :, 2, :, 1],
]
for x in tensors:
S0, S1 = x.stride()[1:]
offset = x.storage_offset()
_test([5, 5, 2, 3], [0, 0, S0, S1], offset, x, lambda x: x.expand(5, 5, 2, 3))
_test([3, 2], [S1, S0], offset, x, lambda x: x.transpose(0, 1))
_test([2], [S0], offset + S1, x, lambda x: x[:, 1])
B1 = 7
x = torch.randn(B1, B0, 2, 3)
S0, S1 = x.stride()[2:]
result = vmap(vmap(lambda t: t.as_strided([5, 5, 2, 3], [0, 0, S0, S1])), in_dims=1)(x)
expected = vmap(vmap(lambda t: t.expand(5, 5, 2, 3)), in_dims=1)(x)
self.assertTrue(result._base is expected._base)
self.assertEqual(result, expected)
with self.assertRaisesRegex(RuntimeError, 'size and stride must have the same length'):
x =
|
torch
|
torch.randn(B0, 2, 3).transpose(0, 1)
|
vmap(lambda x: x.as_strided([1, 1, 1], [1, 1]))(x)
|
torch
|
torch.randn.transpose
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapOperators(Namespace.TestVmapBase):
def _vmap_test(self, *args, **kwargs):
return _vmap_test(self, *args, **kwargs)
def _vmap_view_test(self, *args, **kwargs):
self._vmap_test(*args, **kwargs, check_view=True)
def _test_unary(self, op, getter, device, *args, **kwargs):
test = functools.partial(self._vmap_test, *args, **kwargs)
B0, B1 = 7, 11
test(op, [getter([B0, 3], device)])
test(op, [getter([2, 5, B0, 3], device)], in_dims=2)
test(op, [getter([2, 5, B0, 3], device)], in_dims=2, out_dims=2)
test(vmap(op), [getter([B0, B1], device)])
test(vmap(op), [getter([B1, 2, 5, B0, 3], device)], in_dims=2)
test(vmap(op, in_dims=2), [getter([2, 5, B0, B1, 3], device)],
in_dims=2, out_dims=2)
@parametrize("case", [
(torch.abs, TensorFactory.randn),
(torch.acos, TensorFactory.rand),
(torch.asin, TensorFactory.rand),
(torch.atan, TensorFactory.rand),
(torch.ceil, TensorFactory.randn),
(torch.cos, TensorFactory.rand),
(torch.cosh, TensorFactory.rand),
(torch.digamma, TensorFactory.rand),
(torch.exp, TensorFactory.randn),
(torch.expm1, TensorFactory.randn),
(torch.floor, TensorFactory.randn),
(torch.frac, TensorFactory.randn),
(torch.lgamma, TensorFactory.rand),
(torch.log, TensorFactory.randp1),
(torch.log10, TensorFactory.randp1),
(torch.log1p, TensorFactory.randp1),
(torch.log2, TensorFactory.randp1),
(torch.neg, TensorFactory.randn),
(torch.reciprocal, TensorFactory.randp1),
(torch.relu, TensorFactory.randn),
(torch.round, TensorFactory.randn),
(torch.rsqrt, TensorFactory.randp1),
(torch.sigmoid, TensorFactory.randn),
(torch.sign, TensorFactory.randn),
(torch.sin, TensorFactory.rand),
(torch.sinh, TensorFactory.rand),
(torch.sqrt, TensorFactory.rand),
(torch.tan, TensorFactory.rand),
(torch.tanh, TensorFactory.rand),
(torch.trunc, TensorFactory.randn),
], name_fn=lambda x: x[0].__name__)
def test_unary_pointwise(self, case):
op, getter = case
self._test_unary(op, getter, 'cpu')
method = getattr(Tensor, f'{op.__name__ + "_"}')
self._test_unary(method, getter, 'cpu', check_propagates_grad=False)
def test_clone(self):
self._test_unary(lambda x: x.clone(), TensorFactory.randn, 'cpu')
self._test_unary(lambda x: x.clone(memory_format=torch.preserve_format),
TensorFactory.randn, 'cpu')
self._test_unary(lambda x: x.clone(memory_format=torch.contiguous_format),
TensorFactory.randn, 'cpu')
def clone_contiguous(x):
return x.clone(memory_format=torch.contiguous_format)
B0, B1 = 3, 5
x = torch.randn(2, B0, 7)
y = vmap(clone_contiguous, in_dims=1, out_dims=1)(x)
self.assertTrue(y.movedim(1, 0).is_contiguous())
self.assertTrue(y[:, 0, :].is_contiguous())
x = torch.randn(2, B0, 7, B1)
y = vmap(vmap(clone_contiguous, in_dims=2), in_dims=1)(x)
self.assertTrue(y.is_contiguous())
self.assertTrue(y[0][0].is_contiguous())
msg = r'only supported with memory_format torch.preserve_format or torch.contiguous_format'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(lambda x: x.clone(memory_format=torch.channels_last))(torch.randn(B0))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(lambda x: x.clone(memory_format=torch.channels_last_3d))(torch.randn(B0))
def test_weird_matmul_case(self):
x = torch.randn(5, 2, 2, 2)
y = torch.randn(5, 7, 2)
vmap(vmap(torch.matmul, in_dims=(None, 0)))(x, y)
@parametrize("case",
(
(torch.clamp_min_, TensorFactory.randn),
(torch.clamp_max_, TensorFactory.randn),
), name_fn=lambda x: x[0].__name__)
def test_clamp_inplace_variant(self, case):
test = self._vmap_test
def get_number(getter):
return getter([]).item()
op, getter = case
device = 'cpu'
B0, B1 = 7, 11
test(op, (getter([B0, 3], device), getter([B0, 3], device)), check_propagates_grad=False)
test(op, (getter([B0], device), getter([B0], device)), check_propagates_grad=False)
test(op, (getter([2, B0, 3], device), getter([2, B0, 3], device)), in_dims=(1, 1), check_propagates_grad=False)
test(op, (getter([B0, 2, 3], device), getter([2, B0, 3], device)),
in_dims=(0, 1), out_dims=1, check_propagates_grad=False)
test(op, (getter([B0, 2, 3], device), getter([1, 1], device)), in_dims=(0, None), check_propagates_grad=False)
test(op, (getter([B0, 3], device), getter([B0, 3], device)), in_dims=(0, 0), check_propagates_grad=False)
test(vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 1, 3], device)), check_propagates_grad=False)
number = get_number(getter)
self._test_unary(lambda t: op(t, number), getter, device, check_propagates_grad=False)
@parametrize('case', [
subtest(_make_case(torch.clamp_min), name='clamp_min'),
subtest(_make_case(torch.clamp_max), name='clamp_max'),
])
def test_clamp_variant(self, case):
test = self._vmap_test
def get_number(getter):
return getter([]).item()
op, getter = case
device = 'cpu'
B0, B1 = 7, 11
test(op, (getter([B0, 3], device), getter([B0, 3], device)))
test(op, (getter([B0], device), getter([B0, 2, 3], device)))
test(op, (getter([B0], device), getter([2, B0, 3], device)), in_dims=(0, 1))
test(op, (getter([B0], device), getter([2, B0, 3], device)),
in_dims=(0, 1), out_dims=1)
test(op, (getter([B0], device), getter([2, 3], device)), in_dims=(0, None))
test(op, (getter([2, 3], device), getter([B0, 3], device)), in_dims=(None, 0))
test(vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 3], device)))
test(vmap(op, in_dims=(None, 0)),
(getter([B0, 2, 3], device), getter([B1, 3], device)), in_dims=(0, None))
number = get_number(getter)
self._test_unary(lambda t: op(t, number), getter, device)
def test_copy_(self):
x = torch.randn(3)
y = torch.randn(3)
vmap(Tensor.copy_)(x, y)
self.assertEqual(x, y)
x = torch.randn(3)
y = torch.randn(3, 2)
vmap(Tensor.copy_, in_dims=(1, None))(y, x)
self.assertEqual(y, x.expand(2, 3).t())
x = torch.randn(3)
y = torch.randn(2, 3)
with self.assertRaisesRegex(RuntimeError, 'inplace'):
vmap(Tensor.copy_, in_dims=(None, 0))(x, y)
@parametrize('case', [
subtest(_make_case(torch.add), name='add'),
subtest(_make_case(lambda x, y: x + y), name='add_dunder'),
subtest(_make_case(torch.sub), name='sub'),
subtest(_make_case(lambda x, y: x - y), name='sub_dunder'),
subtest(_make_case(torch.mul), name='mul'),
subtest(_make_case(lambda x, y: x * y), name='mul_dunder'),
subtest(_make_case(torch.div, input_getter=TensorFactory.randp1), name='div'),
subtest(_make_case(lambda x, y: x / y, input_getter=TensorFactory.randp1), name='div_dunder'),
subtest(_make_case(torch.pow, input_getter=TensorFactory.randp1), name='pow'),
subtest(_make_case(lambda x, y: x ** y, input_getter=TensorFactory.randp1), name='pow_dunder'),
])
def test_arithmetic(self, case):
test = self._vmap_test
def get_number(getter):
return getter([]).item()
op, getter = case
device = 'cpu'
B0, B1 = 7, 11
test(op, (getter([B0, 3], device), getter([B0, 3], device)))
test(op, (getter([B0], device), getter([B0, 2, 3], device)))
test(op, (getter([B0], device), getter([2, B0, 3], device)), in_dims=(0, 1))
test(op, (getter([B0], device), getter([2, B0, 3], device)),
in_dims=(0, 1), out_dims=1)
test(op, (getter([B0], device), getter([2, 3], device)), in_dims=(0, None))
test(op, (getter([2, 3], device), getter([B0, 3], device)), in_dims=(0, None))
test(vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 3], device)))
test(vmap(op, in_dims=(None, 0)),
(getter([B0, 2, 3], device), getter([B1, 3], device)), in_dims=(0, None))
number = get_number(getter)
self._test_unary(lambda t: op(t, number), getter, device)
number = get_number(getter)
self._test_unary(lambda t: op(number, t), getter, device)
test(op, (getter([B0], device), getter([B0], device, dtype=torch.double)))
test(op, (getter([B0], device, dtype=torch.double), getter([B0], device)))
test(op, (getter([B0], device), getter([B0], device)))
test(op, (getter([B0, 2], device), getter([B0], device, torch.double)))
test(op, (getter([B0], device, torch.double), getter([B0, 2], device)))
if not torch.cuda.is_available():
return
@unittest.expectedFailure
def test_as_strided(self):
def _test(sizes, strides, offset, tensor, lambd):
result = vmap(lambda t: t.as_strided(sizes, strides, offset))(tensor)
expected = vmap(lambd)(tensor)
self.assertTrue(result._base is expected._base)
self.assertEqual(result, expected)
B0 = 5
tensors = [
torch.randn(B0, 2, 3),
torch.randn(B0, 3, 2).transpose(1, 2),
torch.randn(2, B0, 2, 3)[1],
torch.randn(B0, 2, 4, 3, 7)[:, :, 0, :, 0],
torch.randn(B0, 2, 4, 3, 7)[:, :, 2, :, 1],
]
for x in tensors:
S0, S1 = x.stride()[1:]
offset = x.storage_offset()
_test([5, 5, 2, 3], [0, 0, S0, S1], offset, x, lambda x: x.expand(5, 5, 2, 3))
_test([3, 2], [S1, S0], offset, x, lambda x: x.transpose(0, 1))
_test([2], [S0], offset + S1, x, lambda x: x[:, 1])
B1 = 7
x = torch.randn(B1, B0, 2, 3)
S0, S1 = x.stride()[2:]
result = vmap(vmap(lambda t: t.as_strided([5, 5, 2, 3], [0, 0, S0, S1])), in_dims=1)(x)
expected = vmap(vmap(lambda t: t.expand(5, 5, 2, 3)), in_dims=1)(x)
self.assertTrue(result._base is expected._base)
self.assertEqual(result, expected)
with self.assertRaisesRegex(RuntimeError, 'size and stride must have the same length'):
x = torch.randn(B0, 2, 3).transpose(0, 1)
vmap(lambda x: x.as_strided([1, 1, 1], [1, 1]))(x)
msg = 'batch dims being vmapped over are at the front of the tensor'
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(2, B0, 3).transpose(0, 1)
vmap(lambda x: x.as_strided([2, 3], [B0 * 3, 1]))(x)
with self.assertRaisesRegex(RuntimeError, msg):
x =
|
torch
|
torch.randn(B0, 2, 3, B1).movedim(3, 1)
|
vmap(vmap(lambda x: x.as_strided([2, 3], [B1 * 3, B1])))(x)
|
torch
|
torch.randn.movedim
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapBatchedGradient(Namespace.TestVmapBase):
def _vmap_test(self, *args, **kwargs):
return _vmap_test(self, *args, **kwargs)
def _batched_grad_test(self, op, args, kwargs=None, output_process_fn=lambda x: x, batch_size=3):
if kwargs is None:
kwargs = {}
outputs = op(*args, **kwargs)
outputs = differentiable(output_process_fn(outputs))
batched_vectors = tuple(construct_v(out, batch_size) for out in outputs)
def vector_jacobian_product(*vectors):
return torch.autograd.grad(outputs, differentiable(args), vectors,
retain_graph=True)
self._vmap_test(vector_jacobian_product, batched_vectors,
check_propagates_grad=False)
def _batched_grad_grad_test(self, op, args, kwargs=None, output_process_fn=lambda x: x, batch_size=3):
if kwargs is None:
kwargs = {}
outputs = op(*args, **kwargs)
outputs = differentiable(output_process_fn(outputs))
ones = tuple(torch.ones_like(out) for out in outputs)
first_grads = torch.autograd.grad(outputs, differentiable(args), ones,
create_graph=True)
first_grads = differentiable(first_grads)
self.assertNotEqual(
len(first_grads), 0, "None of the first grads depend on the input!")
batched_vectors = tuple(construct_v(grad, batch_size) for grad in first_grads)
def vector_hessian_product(*vectors):
outputs = torch.autograd.grad(first_grads, differentiable(args), vectors,
retain_graph=True, allow_unused=True)
outputs = tuple(out for out in outputs if out is not None)
assert len(outputs) > 0
return outputs
self._vmap_test(vector_hessian_product, batched_vectors,
check_propagates_grad=False)
def _test_arithmetic(self, op, device, test_grad_grad=True):
x = torch.randn(2, 3, requires_grad=True, device=device)
y = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
scalar = 3.14
self._batched_grad_test(op, (x, y))
self._batched_grad_test(op, (scalar, y))
self._batched_grad_test(op, (x, scalar))
if test_grad_grad:
self._batched_grad_grad_test(op, (x, y))
def test_add(self, device):
self._test_arithmetic(torch.add, device, test_grad_grad=False)
self._test_arithmetic(lambda x, y: x + y, device, test_grad_grad=False)
def test_sub(self, device):
self._test_arithmetic(torch.sub, device, test_grad_grad=False)
self._test_arithmetic(lambda x, y: x - y, device, test_grad_grad=False)
def test_mul(self, device):
self._test_arithmetic(torch.mul, device)
self._test_arithmetic(lambda x, y: x * y, device)
def test_div(self, device):
self._test_arithmetic(torch.div, device)
self._test_arithmetic(lambda x, y: x / y, device)
def test_binary_cross_entropy(self, device):
x =
|
F
|
F.sigmoid(torch.randn(3, 2, device=device, requires_grad=True))
|
target = torch.rand(3, 2, device=device)
op = functools.partial(F.binary_cross_entropy, target=target)
|
torch
|
torch.nn.functional.sigmoid
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapOperatorsOpInfo(TestCase):
vmap_fail = {
xfail('fill_'),
xfail('resize_'),
xfail('resize_as_'),
xfail('tensor_split'),
xfail('to_sparse'),
xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
xfail('view_as_complex'),
xfail('masked_select'),
xfail('svd', device_type='cuda'),
xfail('linalg.svd', device_type='cuda'),
xfail('matrix_exp'),
xfail('lu_unpack'),
xfail('histogramdd'),
xfail('nn.functional.embedding', ''),
xfail('randn_like'),
xfail('allclose'),
xfail('bfloat16', 'channels_last'),
xfail('byte', 'channels_last'),
xfail('char', 'channels_last'),
xfail('double', 'channels_last'),
xfail('float', 'channels_last'),
xfail('half', 'channels_last'),
xfail('int', 'channels_last'),
xfail('long', 'channels_last'),
xfail('short', 'channels_last'),
xfail('bool', 'channels_last'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('rand_like'),
xfail('randint_like'),
xfail('nn.functional.fractional_max_pool3d'),
xfail('as_strided'),
xfail('nn.functional.fractional_max_pool2d'),
xfail('nn.functional.embedding_bag'),
xfail('nonzero'),
xfail('nn.functional.glu'),
xfail('nn.functional.rrelu'), # random?
}
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', (
tol1('linalg.det',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', vmap_fail)
def test_vmap_exhaustive(self, device, dtype, op):
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
try:
generator = get_fallback_and_vmap_exhaustive(op.op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in generator:
if op.name == 'empty_like' or op.name == 'new_empty':
self.assertEqual(loop_out.shape, batched_out.shape)
continue
self.assertEqual(loop_out, batched_out)
for a_op in op.aliases:
a_generator = get_fallback_and_vmap_exhaustive(a_op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in a_generator:
self.assertEqual(loop_out, batched_out)
except Exception as e:
if "dynamic" in e.args[0]:
continue
raise e
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', (
tol1('linalg.det',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', vmap_fail.union({
xfail('complex'),
xfail('copysign'),
xfail('eig'),
xfail('fill_'),
xfail('histogram'),
xfail('index_fill'),
xfail('index_put', ''),
xfail('isin'),
xfail('linalg.cholesky'),
xfail('linalg.eigvals'),
xfail('linalg.eigvalsh'),
xfail('linalg.householder_product'),
xfail('linalg.inv'),
xfail('linalg.lstsq'),
xfail('linalg.matrix_norm'),
xfail('linalg.matrix_power'),
xfail('linalg.matrix_rank'),
xfail('linalg.matrix_rank', 'hermitian'),
xfail('linalg.pinv'),
xfail('linalg.pinv', 'hermitian'),
xfail('linalg.norm'),
xfail('linalg.solve'),
xfail('linalg.tensorinv'),
xfail('lu_solve'),
xfail('lu_unpack'),
xfail('masked_fill'),
xfail('masked_scatter'),
xfail('masked_select'),
xfail('nanquantile'),
xfail('norm', 'nuc'),
xfail('ormqr'),
xfail('put'),
xfail('quantile'),
xfail('renorm'),
xfail('resize_as_'),
xfail('take'),
xfail('tensor_split'),
xfail('to_sparse'),
xfail('vdot'),
xfail('__getitem__', ''),
xfail('all'),
xfail('any'),
xfail('count_nonzero'),
xfail('nanmean'),
xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
xfail('resize_'),
xfail('view_as_complex'),
xfail('matrix_exp'),
xfail('bucketize'),
xfail('fft.ihfft2'),
xfail('fft.ihfftn'),
xfail('allclose'),
xfail('argwhere'),
xfail('bfloat16', 'channels_last'),
xfail('byte', 'channels_last'),
xfail('char', 'channels_last'),
xfail('double', 'channels_last'),
xfail('float', 'channels_last'),
xfail('half', 'channels_last'),
xfail('int', 'channels_last'),
xfail('bool', 'channels_last'),
xfail('linalg.cross'),
xfail('long', 'channels_last'),
xfail('searchsorted'),
xfail('short', 'channels_last'),
xfail('unique_consecutive'),
xfail('unique'),
xfail('nn.functional.ctc_loss'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('nn.functional.poisson_nll_loss'),
xfail('nn.functional.huber_loss'),
xfail('nn.functional.max_pool1d'),
xfail('nn.functional.max_pool3d'),
xfail('histc'),
xfail('as_strided'),
xfail('istft'),
xfail('nonzero'),
xfail('nn.functional.fractional_max_pool2d'),
xfail('stft'),
xfail('linalg.solve_triangular'),
xfail('nn.functional.glu'),
xfail('nn.functional.prelu'),
xfail('isclose'),
xfail('nn.functional.fractional_max_pool3d'),
xfail('nn.functional.bilinear'),
xfail('nn.functional.embedding_bag'),
xfail('linalg.tensorsolve'),
}))
def test_op_has_batch_rule(self, device, dtype, op):
def test():
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
generator = get_fallback_and_vmap_exhaustive(op.op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in generator:
if op.name == 'empty_like' or op.name == 'new_empty':
self.assertEqual(loop_out.shape, batched_out.shape)
continue
self.assertEqual(loop_out, batched_out)
for a_op in op.aliases:
a_generator = get_fallback_and_vmap_exhaustive(a_op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in a_generator:
self.assertEqual(loop_out, batched_out)
check_vmap_fallback(self, test, op)
def test_conv_double_backward(self, device):
images = torch.randn(2, 1, 5, 5, device=device)
weight = torch.randn(2, 1, 2, 2, device=device)
bias = torch.randn(2, device=device)
ggI = torch.randn_like(images)
ggW = torch.randn_like(weight)
ggb = torch.randn_like(bias)
stride = (1, 1)
padding = (0, 0)
dilation = (1, 1)
transposed = False
output_padding = (0, 0)
groups = 1
output_mask = (True, True, True)
gO = torch.randn_like(F.conv2d(images, weight, bias, stride, padding, dilation, groups))
args = (
ggI, ggW, ggb, gO, weight, images, stride, padding, dilation,
transposed, output_padding, groups, output_mask,
)
op = torch.ops.aten._convolution_double_backward
generator = get_fallback_and_vmap_exhaustive(op, args, {})
def test():
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out, atol=1e-4, rtol=1e-4)
check_vmap_fallback(self, test, op)
def test_isnan(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isnan
x = torch.randn(B, N, C, H, W)
x[x > 0] = float('nan')
test(self, op, (x,), in_dims=(0))
def test_isinf(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isinf
x = torch.randn(B, N, C, H, W)
x[x > 0] = float('inf')
test(self, op, (x,), in_dims=(0))
def test_foo_like(self, device):
B, N, C, H, W = 2, 3, 24, 5, 7
for op in [torch.ones_like, torch.zeros_like]:
x = torch.randn(B, N, C, H, W)
vmap(op, in_dims=(0,))(x,)
def test_flatten(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
op = torch.flatten
x = torch.randn(2, 3, 4, 5)
test(self, op, (x, 1, 2), in_dims=(0, None, None))
def test_group_norm(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = F.group_norm
x = torch.randn(B, N, C, H, W)
weight = torch.randn(C)
bias = torch.randn(C)
test(self, op, (x, 3, weight, bias), in_dims=(0, None, None, None))
x = torch.randn(B, N, C, H, W)
weight = torch.randn(B, C)
bias = torch.randn(B, C)
test(self, op, (x, 4, weight, bias), in_dims=(0, None, 0, 0))
def test_index_put(self, device):
def test(f, t, idx, values):
base = f(t[0], idx[0], values[0])
self.assertEqual(vmap(f, in_dims=(0, 0, 0))(t, idx, values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, None, None))(t, idx[0], values[0])[0], base)
self.assertEqual(vmap(f, in_dims=(0, None, 0))(t, idx[0], values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, 0, None))(t, idx, values[0])[0], base)
def f(x, y, z):
x[y] = z
return x
x = torch.randn(3, 4, 5, device=device)
y =
|
torch
|
torch.zeros((3, 2), device=device).long()
|
z = torch.randn(3, 2, 5, device=device)
test(f, x, y, z)
|
torch
|
torch.zeros.long
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapOperatorsOpInfo(TestCase):
vmap_fail = {
xfail('fill_'),
xfail('resize_'),
xfail('resize_as_'),
xfail('tensor_split'),
xfail('to_sparse'),
xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
xfail('view_as_complex'),
xfail('masked_select'),
xfail('svd', device_type='cuda'),
xfail('linalg.svd', device_type='cuda'),
xfail('matrix_exp'),
xfail('lu_unpack'),
xfail('histogramdd'),
xfail('nn.functional.embedding', ''),
xfail('randn_like'),
xfail('allclose'),
xfail('bfloat16', 'channels_last'),
xfail('byte', 'channels_last'),
xfail('char', 'channels_last'),
xfail('double', 'channels_last'),
xfail('float', 'channels_last'),
xfail('half', 'channels_last'),
xfail('int', 'channels_last'),
xfail('long', 'channels_last'),
xfail('short', 'channels_last'),
xfail('bool', 'channels_last'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('rand_like'),
xfail('randint_like'),
xfail('nn.functional.fractional_max_pool3d'),
xfail('as_strided'),
xfail('nn.functional.fractional_max_pool2d'),
xfail('nn.functional.embedding_bag'),
xfail('nonzero'),
xfail('nn.functional.glu'),
xfail('nn.functional.rrelu'), # random?
}
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', (
tol1('linalg.det',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', vmap_fail)
def test_vmap_exhaustive(self, device, dtype, op):
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
try:
generator = get_fallback_and_vmap_exhaustive(op.op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in generator:
if op.name == 'empty_like' or op.name == 'new_empty':
self.assertEqual(loop_out.shape, batched_out.shape)
continue
self.assertEqual(loop_out, batched_out)
for a_op in op.aliases:
a_generator = get_fallback_and_vmap_exhaustive(a_op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in a_generator:
self.assertEqual(loop_out, batched_out)
except Exception as e:
if "dynamic" in e.args[0]:
continue
raise e
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', (
tol1('linalg.det',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', vmap_fail.union({
xfail('complex'),
xfail('copysign'),
xfail('eig'),
xfail('fill_'),
xfail('histogram'),
xfail('index_fill'),
xfail('index_put', ''),
xfail('isin'),
xfail('linalg.cholesky'),
xfail('linalg.eigvals'),
xfail('linalg.eigvalsh'),
xfail('linalg.householder_product'),
xfail('linalg.inv'),
xfail('linalg.lstsq'),
xfail('linalg.matrix_norm'),
xfail('linalg.matrix_power'),
xfail('linalg.matrix_rank'),
xfail('linalg.matrix_rank', 'hermitian'),
xfail('linalg.pinv'),
xfail('linalg.pinv', 'hermitian'),
xfail('linalg.norm'),
xfail('linalg.solve'),
xfail('linalg.tensorinv'),
xfail('lu_solve'),
xfail('lu_unpack'),
xfail('masked_fill'),
xfail('masked_scatter'),
xfail('masked_select'),
xfail('nanquantile'),
xfail('norm', 'nuc'),
xfail('ormqr'),
xfail('put'),
xfail('quantile'),
xfail('renorm'),
xfail('resize_as_'),
xfail('take'),
xfail('tensor_split'),
xfail('to_sparse'),
xfail('vdot'),
xfail('__getitem__', ''),
xfail('all'),
xfail('any'),
xfail('count_nonzero'),
xfail('nanmean'),
xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
xfail('resize_'),
xfail('view_as_complex'),
xfail('matrix_exp'),
xfail('bucketize'),
xfail('fft.ihfft2'),
xfail('fft.ihfftn'),
xfail('allclose'),
xfail('argwhere'),
xfail('bfloat16', 'channels_last'),
xfail('byte', 'channels_last'),
xfail('char', 'channels_last'),
xfail('double', 'channels_last'),
xfail('float', 'channels_last'),
xfail('half', 'channels_last'),
xfail('int', 'channels_last'),
xfail('bool', 'channels_last'),
xfail('linalg.cross'),
xfail('long', 'channels_last'),
xfail('searchsorted'),
xfail('short', 'channels_last'),
xfail('unique_consecutive'),
xfail('unique'),
xfail('nn.functional.ctc_loss'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('nn.functional.poisson_nll_loss'),
xfail('nn.functional.huber_loss'),
xfail('nn.functional.max_pool1d'),
xfail('nn.functional.max_pool3d'),
xfail('histc'),
xfail('as_strided'),
xfail('istft'),
xfail('nonzero'),
xfail('nn.functional.fractional_max_pool2d'),
xfail('stft'),
xfail('linalg.solve_triangular'),
xfail('nn.functional.glu'),
xfail('nn.functional.prelu'),
xfail('isclose'),
xfail('nn.functional.fractional_max_pool3d'),
xfail('nn.functional.bilinear'),
xfail('nn.functional.embedding_bag'),
xfail('linalg.tensorsolve'),
}))
def test_op_has_batch_rule(self, device, dtype, op):
def test():
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
generator = get_fallback_and_vmap_exhaustive(op.op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in generator:
if op.name == 'empty_like' or op.name == 'new_empty':
self.assertEqual(loop_out.shape, batched_out.shape)
continue
self.assertEqual(loop_out, batched_out)
for a_op in op.aliases:
a_generator = get_fallback_and_vmap_exhaustive(a_op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in a_generator:
self.assertEqual(loop_out, batched_out)
check_vmap_fallback(self, test, op)
def test_conv_double_backward(self, device):
images = torch.randn(2, 1, 5, 5, device=device)
weight = torch.randn(2, 1, 2, 2, device=device)
bias = torch.randn(2, device=device)
ggI = torch.randn_like(images)
ggW = torch.randn_like(weight)
ggb = torch.randn_like(bias)
stride = (1, 1)
padding = (0, 0)
dilation = (1, 1)
transposed = False
output_padding = (0, 0)
groups = 1
output_mask = (True, True, True)
gO = torch.randn_like(F.conv2d(images, weight, bias, stride, padding, dilation, groups))
args = (
ggI, ggW, ggb, gO, weight, images, stride, padding, dilation,
transposed, output_padding, groups, output_mask,
)
op = torch.ops.aten._convolution_double_backward
generator = get_fallback_and_vmap_exhaustive(op, args, {})
def test():
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out, atol=1e-4, rtol=1e-4)
check_vmap_fallback(self, test, op)
def test_isnan(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isnan
x = torch.randn(B, N, C, H, W)
x[x > 0] = float('nan')
test(self, op, (x,), in_dims=(0))
def test_isinf(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isinf
x = torch.randn(B, N, C, H, W)
x[x > 0] = float('inf')
test(self, op, (x,), in_dims=(0))
def test_foo_like(self, device):
B, N, C, H, W = 2, 3, 24, 5, 7
for op in [torch.ones_like, torch.zeros_like]:
x = torch.randn(B, N, C, H, W)
vmap(op, in_dims=(0,))(x,)
def test_flatten(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
op = torch.flatten
x = torch.randn(2, 3, 4, 5)
test(self, op, (x, 1, 2), in_dims=(0, None, None))
def test_group_norm(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = F.group_norm
x = torch.randn(B, N, C, H, W)
weight = torch.randn(C)
bias = torch.randn(C)
test(self, op, (x, 3, weight, bias), in_dims=(0, None, None, None))
x = torch.randn(B, N, C, H, W)
weight = torch.randn(B, C)
bias = torch.randn(B, C)
test(self, op, (x, 4, weight, bias), in_dims=(0, None, 0, 0))
def test_index_put(self, device):
def test(f, t, idx, values):
base = f(t[0], idx[0], values[0])
self.assertEqual(vmap(f, in_dims=(0, 0, 0))(t, idx, values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, None, None))(t, idx[0], values[0])[0], base)
self.assertEqual(vmap(f, in_dims=(0, None, 0))(t, idx[0], values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, 0, None))(t, idx, values[0])[0], base)
def f(x, y, z):
x[y] = z
return x
x = torch.randn(3, 4, 5, device=device)
y = torch.zeros((3, 2), device=device).long()
z = torch.randn(3, 2, 5, device=device)
test(f, x, y, z)
def f(t, idx, values):
t[:, idx] = values
return t
t = torch.zeros((3, 2, 3))
values = torch.ones((3, 1, 2))
idx =
|
torch
|
torch.tensor([[1, 2]]).expand((3, 2))
|
test(f, t, idx, values)
def f(t, idx, values):
|
torch
|
torch.tensor.expand
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapOperatorsOpInfo(TestCase):
vmap_fail = {
xfail('fill_'),
xfail('resize_'),
xfail('resize_as_'),
xfail('tensor_split'),
xfail('to_sparse'),
xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
xfail('view_as_complex'),
xfail('masked_select'),
xfail('svd', device_type='cuda'),
xfail('linalg.svd', device_type='cuda'),
xfail('matrix_exp'),
xfail('lu_unpack'),
xfail('histogramdd'),
xfail('nn.functional.embedding', ''),
xfail('randn_like'),
xfail('allclose'),
xfail('bfloat16', 'channels_last'),
xfail('byte', 'channels_last'),
xfail('char', 'channels_last'),
xfail('double', 'channels_last'),
xfail('float', 'channels_last'),
xfail('half', 'channels_last'),
xfail('int', 'channels_last'),
xfail('long', 'channels_last'),
xfail('short', 'channels_last'),
xfail('bool', 'channels_last'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('rand_like'),
xfail('randint_like'),
xfail('nn.functional.fractional_max_pool3d'),
xfail('as_strided'),
xfail('nn.functional.fractional_max_pool2d'),
xfail('nn.functional.embedding_bag'),
xfail('nonzero'),
xfail('nn.functional.glu'),
xfail('nn.functional.rrelu'), # random?
}
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', (
tol1('linalg.det',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', vmap_fail)
def test_vmap_exhaustive(self, device, dtype, op):
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
try:
generator = get_fallback_and_vmap_exhaustive(op.op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in generator:
if op.name == 'empty_like' or op.name == 'new_empty':
self.assertEqual(loop_out.shape, batched_out.shape)
continue
self.assertEqual(loop_out, batched_out)
for a_op in op.aliases:
a_generator = get_fallback_and_vmap_exhaustive(a_op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in a_generator:
self.assertEqual(loop_out, batched_out)
except Exception as e:
if "dynamic" in e.args[0]:
continue
raise e
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', (
tol1('linalg.det',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', vmap_fail.union({
xfail('complex'),
xfail('copysign'),
xfail('eig'),
xfail('fill_'),
xfail('histogram'),
xfail('index_fill'),
xfail('index_put', ''),
xfail('isin'),
xfail('linalg.cholesky'),
xfail('linalg.eigvals'),
xfail('linalg.eigvalsh'),
xfail('linalg.householder_product'),
xfail('linalg.inv'),
xfail('linalg.lstsq'),
xfail('linalg.matrix_norm'),
xfail('linalg.matrix_power'),
xfail('linalg.matrix_rank'),
xfail('linalg.matrix_rank', 'hermitian'),
xfail('linalg.pinv'),
xfail('linalg.pinv', 'hermitian'),
xfail('linalg.norm'),
xfail('linalg.solve'),
xfail('linalg.tensorinv'),
xfail('lu_solve'),
xfail('lu_unpack'),
xfail('masked_fill'),
xfail('masked_scatter'),
xfail('masked_select'),
xfail('nanquantile'),
xfail('norm', 'nuc'),
xfail('ormqr'),
xfail('put'),
xfail('quantile'),
xfail('renorm'),
xfail('resize_as_'),
xfail('take'),
xfail('tensor_split'),
xfail('to_sparse'),
xfail('vdot'),
xfail('__getitem__', ''),
xfail('all'),
xfail('any'),
xfail('count_nonzero'),
xfail('nanmean'),
xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
xfail('resize_'),
xfail('view_as_complex'),
xfail('matrix_exp'),
xfail('bucketize'),
xfail('fft.ihfft2'),
xfail('fft.ihfftn'),
xfail('allclose'),
xfail('argwhere'),
xfail('bfloat16', 'channels_last'),
xfail('byte', 'channels_last'),
xfail('char', 'channels_last'),
xfail('double', 'channels_last'),
xfail('float', 'channels_last'),
xfail('half', 'channels_last'),
xfail('int', 'channels_last'),
xfail('bool', 'channels_last'),
xfail('linalg.cross'),
xfail('long', 'channels_last'),
xfail('searchsorted'),
xfail('short', 'channels_last'),
xfail('unique_consecutive'),
xfail('unique'),
xfail('nn.functional.ctc_loss'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('nn.functional.poisson_nll_loss'),
xfail('nn.functional.huber_loss'),
xfail('nn.functional.max_pool1d'),
xfail('nn.functional.max_pool3d'),
xfail('histc'),
xfail('as_strided'),
xfail('istft'),
xfail('nonzero'),
xfail('nn.functional.fractional_max_pool2d'),
xfail('stft'),
xfail('linalg.solve_triangular'),
xfail('nn.functional.glu'),
xfail('nn.functional.prelu'),
xfail('isclose'),
xfail('nn.functional.fractional_max_pool3d'),
xfail('nn.functional.bilinear'),
xfail('nn.functional.embedding_bag'),
xfail('linalg.tensorsolve'),
}))
def test_op_has_batch_rule(self, device, dtype, op):
def test():
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
generator = get_fallback_and_vmap_exhaustive(op.op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in generator:
if op.name == 'empty_like' or op.name == 'new_empty':
self.assertEqual(loop_out.shape, batched_out.shape)
continue
self.assertEqual(loop_out, batched_out)
for a_op in op.aliases:
a_generator = get_fallback_and_vmap_exhaustive(a_op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in a_generator:
self.assertEqual(loop_out, batched_out)
check_vmap_fallback(self, test, op)
def test_conv_double_backward(self, device):
images = torch.randn(2, 1, 5, 5, device=device)
weight = torch.randn(2, 1, 2, 2, device=device)
bias = torch.randn(2, device=device)
ggI = torch.randn_like(images)
ggW = torch.randn_like(weight)
ggb = torch.randn_like(bias)
stride = (1, 1)
padding = (0, 0)
dilation = (1, 1)
transposed = False
output_padding = (0, 0)
groups = 1
output_mask = (True, True, True)
gO = torch.randn_like(F.conv2d(images, weight, bias, stride, padding, dilation, groups))
args = (
ggI, ggW, ggb, gO, weight, images, stride, padding, dilation,
transposed, output_padding, groups, output_mask,
)
op = torch.ops.aten._convolution_double_backward
generator = get_fallback_and_vmap_exhaustive(op, args, {})
def test():
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out, atol=1e-4, rtol=1e-4)
check_vmap_fallback(self, test, op)
def test_isnan(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isnan
x = torch.randn(B, N, C, H, W)
x[x > 0] = float('nan')
test(self, op, (x,), in_dims=(0))
def test_isinf(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isinf
x = torch.randn(B, N, C, H, W)
x[x > 0] = float('inf')
test(self, op, (x,), in_dims=(0))
def test_foo_like(self, device):
B, N, C, H, W = 2, 3, 24, 5, 7
for op in [torch.ones_like, torch.zeros_like]:
x = torch.randn(B, N, C, H, W)
vmap(op, in_dims=(0,))(x,)
def test_flatten(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
op = torch.flatten
x = torch.randn(2, 3, 4, 5)
test(self, op, (x, 1, 2), in_dims=(0, None, None))
def test_group_norm(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = F.group_norm
x = torch.randn(B, N, C, H, W)
weight = torch.randn(C)
bias = torch.randn(C)
test(self, op, (x, 3, weight, bias), in_dims=(0, None, None, None))
x = torch.randn(B, N, C, H, W)
weight = torch.randn(B, C)
bias = torch.randn(B, C)
test(self, op, (x, 4, weight, bias), in_dims=(0, None, 0, 0))
def test_index_put(self, device):
def test(f, t, idx, values):
base = f(t[0], idx[0], values[0])
self.assertEqual(vmap(f, in_dims=(0, 0, 0))(t, idx, values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, None, None))(t, idx[0], values[0])[0], base)
self.assertEqual(vmap(f, in_dims=(0, None, 0))(t, idx[0], values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, 0, None))(t, idx, values[0])[0], base)
def f(x, y, z):
x[y] = z
return x
x = torch.randn(3, 4, 5, device=device)
y = torch.zeros((3, 2), device=device).long()
z = torch.randn(3, 2, 5, device=device)
test(f, x, y, z)
def f(t, idx, values):
t[:, idx] = values
return t
t = torch.zeros((3, 2, 3))
values = torch.ones((3, 1, 2))
idx = torch.tensor([[1, 2]]).expand((3, 2))
test(f, t, idx, values)
def f(t, idx, values):
t[:, idx, :] = values
return t
t = torch.zeros((3, 2, 3, 3))
values = torch.ones((3, 1, 2, 3))
idx = torch.tensor([[0, 2]]).expand((3, 2))
test(f, t, idx, values)
def f(t, values):
t[:, :2, :] = values
return t
base = f(t[0], values[0])
self.assertEqual(vmap(f, in_dims=(0, 0))(t, values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, None))(t, values[0])[0], base)
tensor = torch.zeros(3, 3, 4)
value = torch.ones(3, 2)
idxs = (torch.tensor([[0], [1], [2]]), torch.tensor([[0]]), torch.tensor([1, 2]))
expected =
|
torch
|
torch.index_put_(tensor.clone(), idxs, value)
|
def f(t, idx, v):
torch.index_put_(t, idx, v)
return t
|
torch
|
torch.index_put_
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapOperatorsOpInfo(TestCase):
vmap_fail = {
xfail('fill_'),
xfail('resize_'),
xfail('resize_as_'),
xfail('tensor_split'),
xfail('to_sparse'),
xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
xfail('view_as_complex'),
xfail('masked_select'),
xfail('svd', device_type='cuda'),
xfail('linalg.svd', device_type='cuda'),
xfail('matrix_exp'),
xfail('lu_unpack'),
xfail('histogramdd'),
xfail('nn.functional.embedding', ''),
xfail('randn_like'),
xfail('allclose'),
xfail('bfloat16', 'channels_last'),
xfail('byte', 'channels_last'),
xfail('char', 'channels_last'),
xfail('double', 'channels_last'),
xfail('float', 'channels_last'),
xfail('half', 'channels_last'),
xfail('int', 'channels_last'),
xfail('long', 'channels_last'),
xfail('short', 'channels_last'),
xfail('bool', 'channels_last'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('rand_like'),
xfail('randint_like'),
xfail('nn.functional.fractional_max_pool3d'),
xfail('as_strided'),
xfail('nn.functional.fractional_max_pool2d'),
xfail('nn.functional.embedding_bag'),
xfail('nonzero'),
xfail('nn.functional.glu'),
xfail('nn.functional.rrelu'), # random?
}
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', (
tol1('linalg.det',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', vmap_fail)
def test_vmap_exhaustive(self, device, dtype, op):
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
try:
generator = get_fallback_and_vmap_exhaustive(op.op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in generator:
if op.name == 'empty_like' or op.name == 'new_empty':
self.assertEqual(loop_out.shape, batched_out.shape)
continue
self.assertEqual(loop_out, batched_out)
for a_op in op.aliases:
a_generator = get_fallback_and_vmap_exhaustive(a_op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in a_generator:
self.assertEqual(loop_out, batched_out)
except Exception as e:
if "dynamic" in e.args[0]:
continue
raise e
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', (
tol1('linalg.det',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', vmap_fail.union({
xfail('complex'),
xfail('copysign'),
xfail('eig'),
xfail('fill_'),
xfail('histogram'),
xfail('index_fill'),
xfail('index_put', ''),
xfail('isin'),
xfail('linalg.cholesky'),
xfail('linalg.eigvals'),
xfail('linalg.eigvalsh'),
xfail('linalg.householder_product'),
xfail('linalg.inv'),
xfail('linalg.lstsq'),
xfail('linalg.matrix_norm'),
xfail('linalg.matrix_power'),
xfail('linalg.matrix_rank'),
xfail('linalg.matrix_rank', 'hermitian'),
xfail('linalg.pinv'),
xfail('linalg.pinv', 'hermitian'),
xfail('linalg.norm'),
xfail('linalg.solve'),
xfail('linalg.tensorinv'),
xfail('lu_solve'),
xfail('lu_unpack'),
xfail('masked_fill'),
xfail('masked_scatter'),
xfail('masked_select'),
xfail('nanquantile'),
xfail('norm', 'nuc'),
xfail('ormqr'),
xfail('put'),
xfail('quantile'),
xfail('renorm'),
xfail('resize_as_'),
xfail('take'),
xfail('tensor_split'),
xfail('to_sparse'),
xfail('vdot'),
xfail('__getitem__', ''),
xfail('all'),
xfail('any'),
xfail('count_nonzero'),
xfail('nanmean'),
xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
xfail('resize_'),
xfail('view_as_complex'),
xfail('matrix_exp'),
xfail('bucketize'),
xfail('fft.ihfft2'),
xfail('fft.ihfftn'),
xfail('allclose'),
xfail('argwhere'),
xfail('bfloat16', 'channels_last'),
xfail('byte', 'channels_last'),
xfail('char', 'channels_last'),
xfail('double', 'channels_last'),
xfail('float', 'channels_last'),
xfail('half', 'channels_last'),
xfail('int', 'channels_last'),
xfail('bool', 'channels_last'),
xfail('linalg.cross'),
xfail('long', 'channels_last'),
xfail('searchsorted'),
xfail('short', 'channels_last'),
xfail('unique_consecutive'),
xfail('unique'),
xfail('nn.functional.ctc_loss'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('nn.functional.poisson_nll_loss'),
xfail('nn.functional.huber_loss'),
xfail('nn.functional.max_pool1d'),
xfail('nn.functional.max_pool3d'),
xfail('histc'),
xfail('as_strided'),
xfail('istft'),
xfail('nonzero'),
xfail('nn.functional.fractional_max_pool2d'),
xfail('stft'),
xfail('linalg.solve_triangular'),
xfail('nn.functional.glu'),
xfail('nn.functional.prelu'),
xfail('isclose'),
xfail('nn.functional.fractional_max_pool3d'),
xfail('nn.functional.bilinear'),
xfail('nn.functional.embedding_bag'),
xfail('linalg.tensorsolve'),
}))
def test_op_has_batch_rule(self, device, dtype, op):
def test():
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
generator = get_fallback_and_vmap_exhaustive(op.op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in generator:
if op.name == 'empty_like' or op.name == 'new_empty':
self.assertEqual(loop_out.shape, batched_out.shape)
continue
self.assertEqual(loop_out, batched_out)
for a_op in op.aliases:
a_generator = get_fallback_and_vmap_exhaustive(a_op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in a_generator:
self.assertEqual(loop_out, batched_out)
check_vmap_fallback(self, test, op)
def test_conv_double_backward(self, device):
images = torch.randn(2, 1, 5, 5, device=device)
weight = torch.randn(2, 1, 2, 2, device=device)
bias = torch.randn(2, device=device)
ggI = torch.randn_like(images)
ggW = torch.randn_like(weight)
ggb = torch.randn_like(bias)
stride = (1, 1)
padding = (0, 0)
dilation = (1, 1)
transposed = False
output_padding = (0, 0)
groups = 1
output_mask = (True, True, True)
gO = torch.randn_like(F.conv2d(images, weight, bias, stride, padding, dilation, groups))
args = (
ggI, ggW, ggb, gO, weight, images, stride, padding, dilation,
transposed, output_padding, groups, output_mask,
)
op = torch.ops.aten._convolution_double_backward
generator = get_fallback_and_vmap_exhaustive(op, args, {})
def test():
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out, atol=1e-4, rtol=1e-4)
check_vmap_fallback(self, test, op)
def test_isnan(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isnan
x = torch.randn(B, N, C, H, W)
x[x > 0] = float('nan')
test(self, op, (x,), in_dims=(0))
def test_isinf(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isinf
x = torch.randn(B, N, C, H, W)
x[x > 0] = float('inf')
test(self, op, (x,), in_dims=(0))
def test_foo_like(self, device):
B, N, C, H, W = 2, 3, 24, 5, 7
for op in [torch.ones_like, torch.zeros_like]:
x = torch.randn(B, N, C, H, W)
vmap(op, in_dims=(0,))(x,)
def test_flatten(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
op = torch.flatten
x = torch.randn(2, 3, 4, 5)
test(self, op, (x, 1, 2), in_dims=(0, None, None))
def test_group_norm(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = F.group_norm
x = torch.randn(B, N, C, H, W)
weight = torch.randn(C)
bias = torch.randn(C)
test(self, op, (x, 3, weight, bias), in_dims=(0, None, None, None))
x = torch.randn(B, N, C, H, W)
weight = torch.randn(B, C)
bias = torch.randn(B, C)
test(self, op, (x, 4, weight, bias), in_dims=(0, None, 0, 0))
def test_index_put(self, device):
def test(f, t, idx, values):
base = f(t[0], idx[0], values[0])
self.assertEqual(vmap(f, in_dims=(0, 0, 0))(t, idx, values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, None, None))(t, idx[0], values[0])[0], base)
self.assertEqual(vmap(f, in_dims=(0, None, 0))(t, idx[0], values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, 0, None))(t, idx, values[0])[0], base)
def f(x, y, z):
x[y] = z
return x
x = torch.randn(3, 4, 5, device=device)
y = torch.zeros((3, 2), device=device).long()
z = torch.randn(3, 2, 5, device=device)
test(f, x, y, z)
def f(t, idx, values):
t[:, idx] = values
return t
t = torch.zeros((3, 2, 3))
values = torch.ones((3, 1, 2))
idx = torch.tensor([[1, 2]]).expand((3, 2))
test(f, t, idx, values)
def f(t, idx, values):
t[:, idx, :] = values
return t
t = torch.zeros((3, 2, 3, 3))
values = torch.ones((3, 1, 2, 3))
idx = torch.tensor([[0, 2]]).expand((3, 2))
test(f, t, idx, values)
def f(t, values):
t[:, :2, :] = values
return t
base = f(t[0], values[0])
self.assertEqual(vmap(f, in_dims=(0, 0))(t, values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, None))(t, values[0])[0], base)
tensor = torch.zeros(3, 3, 4)
value = torch.ones(3, 2)
idxs = (torch.tensor([[0], [1], [2]]), torch.tensor([[0]]), torch.tensor([1, 2]))
expected = torch.index_put_(tensor.clone(), idxs, value)
def f(t, idx, v):
torch.index_put_(t, idx, v)
return t
self.assertEqual(vmap(f, in_dims=(0, (None, None), 0))(tensor, idxs[1:], value), expected)
self.assertEqual(vmap(f, in_dims=(0, (None, None), None))(tensor, idxs[1:], value[0]), expected)
@parametrize('training', [True, False])
@parametrize('track_running_stats', [True, False])
@parametrize('affine', [True, False])
def test_batch_norm(self, device, affine, track_running_stats, training):
if not track_running_stats and not training:
return
test = functools.partial(_vmap_test, check_propagates_grad=False)
BN = torch.nn.BatchNorm2d
ensemble_size = 10
hidden_dim = 3
weights, buffers, _, _, _ = \
functional_init_with_buffers(BN, [ensemble_size])(
hidden_dim, affine=affine, track_running_stats=track_running_stats)
inputs = [torch.randn(ensemble_size, 32, hidden_dim, 16, 16, device=device)]
in_dims = [0]
def append(inp, in_dim):
inputs.append(inp)
in_dims.append(in_dim)
if track_running_stats:
running_mean, running_var, _ = buffers
append(running_mean.to(device), 0)
append(running_var.to(device), 0)
else:
append(None, None)
append(None, None)
if affine:
weight, bias = weights
append(weight.to(device), 0)
append(bias.to(device), 0)
else:
append(None, None)
append(None, None)
append(training, None)
def op(inp, running_mean, running_var, weight, bias, training):
res =
|
F
|
F.batch_norm(inp, running_mean, running_var, weight, bias, training)
|
if track_running_stats:
return res, running_mean, running_var
return res
|
torch
|
torch.nn.functional.batch_norm
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapOperatorsOpInfo(TestCase):
vmap_fail = {
xfail('fill_'),
xfail('resize_'),
xfail('resize_as_'),
xfail('tensor_split'),
xfail('to_sparse'),
xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
xfail('view_as_complex'),
xfail('masked_select'),
xfail('svd', device_type='cuda'),
xfail('linalg.svd', device_type='cuda'),
xfail('matrix_exp'),
xfail('lu_unpack'),
xfail('histogramdd'),
xfail('nn.functional.embedding', ''),
xfail('randn_like'),
xfail('allclose'),
xfail('bfloat16', 'channels_last'),
xfail('byte', 'channels_last'),
xfail('char', 'channels_last'),
xfail('double', 'channels_last'),
xfail('float', 'channels_last'),
xfail('half', 'channels_last'),
xfail('int', 'channels_last'),
xfail('long', 'channels_last'),
xfail('short', 'channels_last'),
xfail('bool', 'channels_last'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('rand_like'),
xfail('randint_like'),
xfail('nn.functional.fractional_max_pool3d'),
xfail('as_strided'),
xfail('nn.functional.fractional_max_pool2d'),
xfail('nn.functional.embedding_bag'),
xfail('nonzero'),
xfail('nn.functional.glu'),
xfail('nn.functional.rrelu'), # random?
}
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', (
tol1('linalg.det',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', vmap_fail)
def test_vmap_exhaustive(self, device, dtype, op):
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
try:
generator = get_fallback_and_vmap_exhaustive(op.op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in generator:
if op.name == 'empty_like' or op.name == 'new_empty':
self.assertEqual(loop_out.shape, batched_out.shape)
continue
self.assertEqual(loop_out, batched_out)
for a_op in op.aliases:
a_generator = get_fallback_and_vmap_exhaustive(a_op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in a_generator:
self.assertEqual(loop_out, batched_out)
except Exception as e:
if "dynamic" in e.args[0]:
continue
raise e
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', (
tol1('linalg.det',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', vmap_fail.union({
xfail('complex'),
xfail('copysign'),
xfail('eig'),
xfail('fill_'),
xfail('histogram'),
xfail('index_fill'),
xfail('index_put', ''),
xfail('isin'),
xfail('linalg.cholesky'),
xfail('linalg.eigvals'),
xfail('linalg.eigvalsh'),
xfail('linalg.householder_product'),
xfail('linalg.inv'),
xfail('linalg.lstsq'),
xfail('linalg.matrix_norm'),
xfail('linalg.matrix_power'),
xfail('linalg.matrix_rank'),
xfail('linalg.matrix_rank', 'hermitian'),
xfail('linalg.pinv'),
xfail('linalg.pinv', 'hermitian'),
xfail('linalg.norm'),
xfail('linalg.solve'),
xfail('linalg.tensorinv'),
xfail('lu_solve'),
xfail('lu_unpack'),
xfail('masked_fill'),
xfail('masked_scatter'),
xfail('masked_select'),
xfail('nanquantile'),
xfail('norm', 'nuc'),
xfail('ormqr'),
xfail('put'),
xfail('quantile'),
xfail('renorm'),
xfail('resize_as_'),
xfail('take'),
xfail('tensor_split'),
xfail('to_sparse'),
xfail('vdot'),
xfail('__getitem__', ''),
xfail('all'),
xfail('any'),
xfail('count_nonzero'),
xfail('nanmean'),
xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
xfail('resize_'),
xfail('view_as_complex'),
xfail('matrix_exp'),
xfail('bucketize'),
xfail('fft.ihfft2'),
xfail('fft.ihfftn'),
xfail('allclose'),
xfail('argwhere'),
xfail('bfloat16', 'channels_last'),
xfail('byte', 'channels_last'),
xfail('char', 'channels_last'),
xfail('double', 'channels_last'),
xfail('float', 'channels_last'),
xfail('half', 'channels_last'),
xfail('int', 'channels_last'),
xfail('bool', 'channels_last'),
xfail('linalg.cross'),
xfail('long', 'channels_last'),
xfail('searchsorted'),
xfail('short', 'channels_last'),
xfail('unique_consecutive'),
xfail('unique'),
xfail('nn.functional.ctc_loss'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('nn.functional.poisson_nll_loss'),
xfail('nn.functional.huber_loss'),
xfail('nn.functional.max_pool1d'),
xfail('nn.functional.max_pool3d'),
xfail('histc'),
xfail('as_strided'),
xfail('istft'),
xfail('nonzero'),
xfail('nn.functional.fractional_max_pool2d'),
xfail('stft'),
xfail('linalg.solve_triangular'),
xfail('nn.functional.glu'),
xfail('nn.functional.prelu'),
xfail('isclose'),
xfail('nn.functional.fractional_max_pool3d'),
xfail('nn.functional.bilinear'),
xfail('nn.functional.embedding_bag'),
xfail('linalg.tensorsolve'),
}))
def test_op_has_batch_rule(self, device, dtype, op):
def test():
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
generator = get_fallback_and_vmap_exhaustive(op.op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in generator:
if op.name == 'empty_like' or op.name == 'new_empty':
self.assertEqual(loop_out.shape, batched_out.shape)
continue
self.assertEqual(loop_out, batched_out)
for a_op in op.aliases:
a_generator = get_fallback_and_vmap_exhaustive(a_op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in a_generator:
self.assertEqual(loop_out, batched_out)
check_vmap_fallback(self, test, op)
def test_conv_double_backward(self, device):
images = torch.randn(2, 1, 5, 5, device=device)
weight = torch.randn(2, 1, 2, 2, device=device)
bias = torch.randn(2, device=device)
ggI = torch.randn_like(images)
ggW = torch.randn_like(weight)
ggb = torch.randn_like(bias)
stride = (1, 1)
padding = (0, 0)
dilation = (1, 1)
transposed = False
output_padding = (0, 0)
groups = 1
output_mask = (True, True, True)
gO = torch.randn_like(F.conv2d(images, weight, bias, stride, padding, dilation, groups))
args = (
ggI, ggW, ggb, gO, weight, images, stride, padding, dilation,
transposed, output_padding, groups, output_mask,
)
op = torch.ops.aten._convolution_double_backward
generator = get_fallback_and_vmap_exhaustive(op, args, {})
def test():
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out, atol=1e-4, rtol=1e-4)
check_vmap_fallback(self, test, op)
def test_isnan(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isnan
x = torch.randn(B, N, C, H, W)
x[x > 0] = float('nan')
test(self, op, (x,), in_dims=(0))
def test_isinf(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isinf
x = torch.randn(B, N, C, H, W)
x[x > 0] = float('inf')
test(self, op, (x,), in_dims=(0))
def test_foo_like(self, device):
B, N, C, H, W = 2, 3, 24, 5, 7
for op in [torch.ones_like, torch.zeros_like]:
x = torch.randn(B, N, C, H, W)
vmap(op, in_dims=(0,))(x,)
def test_flatten(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
op = torch.flatten
x = torch.randn(2, 3, 4, 5)
test(self, op, (x, 1, 2), in_dims=(0, None, None))
def test_group_norm(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = F.group_norm
x = torch.randn(B, N, C, H, W)
weight = torch.randn(C)
bias = torch.randn(C)
test(self, op, (x, 3, weight, bias), in_dims=(0, None, None, None))
x = torch.randn(B, N, C, H, W)
weight = torch.randn(B, C)
bias = torch.randn(B, C)
test(self, op, (x, 4, weight, bias), in_dims=(0, None, 0, 0))
def test_index_put(self, device):
def test(f, t, idx, values):
base = f(t[0], idx[0], values[0])
self.assertEqual(vmap(f, in_dims=(0, 0, 0))(t, idx, values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, None, None))(t, idx[0], values[0])[0], base)
self.assertEqual(vmap(f, in_dims=(0, None, 0))(t, idx[0], values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, 0, None))(t, idx, values[0])[0], base)
def f(x, y, z):
x[y] = z
return x
x = torch.randn(3, 4, 5, device=device)
y = torch.zeros((3, 2), device=device).long()
z = torch.randn(3, 2, 5, device=device)
test(f, x, y, z)
def f(t, idx, values):
t[:, idx] = values
return t
t = torch.zeros((3, 2, 3))
values = torch.ones((3, 1, 2))
idx = torch.tensor([[1, 2]]).expand((3, 2))
test(f, t, idx, values)
def f(t, idx, values):
t[:, idx, :] = values
return t
t = torch.zeros((3, 2, 3, 3))
values = torch.ones((3, 1, 2, 3))
idx = torch.tensor([[0, 2]]).expand((3, 2))
test(f, t, idx, values)
def f(t, values):
t[:, :2, :] = values
return t
base = f(t[0], values[0])
self.assertEqual(vmap(f, in_dims=(0, 0))(t, values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, None))(t, values[0])[0], base)
tensor = torch.zeros(3, 3, 4)
value = torch.ones(3, 2)
idxs = (torch.tensor([[0], [1], [2]]), torch.tensor([[0]]), torch.tensor([1, 2]))
expected = torch.index_put_(tensor.clone(), idxs, value)
def f(t, idx, v):
torch.index_put_(t, idx, v)
return t
self.assertEqual(vmap(f, in_dims=(0, (None, None), 0))(tensor, idxs[1:], value), expected)
self.assertEqual(vmap(f, in_dims=(0, (None, None), None))(tensor, idxs[1:], value[0]), expected)
@parametrize('training', [True, False])
@parametrize('track_running_stats', [True, False])
@parametrize('affine', [True, False])
def test_batch_norm(self, device, affine, track_running_stats, training):
if not track_running_stats and not training:
return
test = functools.partial(_vmap_test, check_propagates_grad=False)
BN = torch.nn.BatchNorm2d
ensemble_size = 10
hidden_dim = 3
weights, buffers, _, _, _ = \
functional_init_with_buffers(BN, [ensemble_size])(
hidden_dim, affine=affine, track_running_stats=track_running_stats)
inputs = [torch.randn(ensemble_size, 32, hidden_dim, 16, 16, device=device)]
in_dims = [0]
def append(inp, in_dim):
inputs.append(inp)
in_dims.append(in_dim)
if track_running_stats:
running_mean, running_var, _ = buffers
append(running_mean.to(device), 0)
append(running_var.to(device), 0)
else:
append(None, None)
append(None, None)
if affine:
weight, bias = weights
append(weight.to(device), 0)
append(bias.to(device), 0)
else:
append(None, None)
append(None, None)
append(training, None)
def op(inp, running_mean, running_var, weight, bias, training):
res = F.batch_norm(inp, running_mean, running_var, weight, bias, training)
if track_running_stats:
return res, running_mean, running_var
return res
test(self, op, tuple(inputs), in_dims=tuple(in_dims))
def test_torch_return_types_returns(self, device):
t = torch.randn(3, 2, 2, device=device)
self.assertTrue(isinstance(vmap(torch.min, (0, None))(t, 0), torch.return_types.min))
self.assertTrue(isinstance(vmap(torch.max, (0, None))(t, 0), torch.return_types.max))
self.assertTrue(isinstance(vmap(torch.topk, (0, None, None))(t, 1, 0), torch.return_types.topk))
self.assertTrue(isinstance(vmap(torch.linalg.eig, (0))(t), torch.return_types.linalg_eig))
def test_namedtuple_returns(self, device):
Point = namedtuple('Point', ['x', 'y'])
def f(x, y):
return Point(x=x, y=y)
x = torch.randn(2, 5, device=device)
y = torch.randn(2, 3, device=device)
self.assertTrue(isinstance(vmap(f)(x, y), Point))
def reset_random(self, generator, orig_state, use_generator, seed):
return generator.set_state(orig_state) if use_generator else torch.manual_seed(seed)
@parametrize('randomness', ['same', 'different', 'error'])
@parametrize('use_generator', [True, False])
def test_random_behavior(self, device, randomness, use_generator):
generator =
|
torch
|
torch.Generator(device=device)
|
orig_state = generator.get_state()
kwargs = {'device': device, 'generator': generator} if use_generator else {'device': device}
only_gen_kwarg = {'generator': generator} if use_generator else {}
supported_random_ops = [
|
torch
|
torch.Generator
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
class TestVmapOperatorsOpInfo(TestCase):
vmap_fail = {
xfail('fill_'),
xfail('resize_'),
xfail('resize_as_'),
xfail('tensor_split'),
xfail('to_sparse'),
xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
xfail('view_as_complex'),
xfail('masked_select'),
xfail('svd', device_type='cuda'),
xfail('linalg.svd', device_type='cuda'),
xfail('matrix_exp'),
xfail('lu_unpack'),
xfail('histogramdd'),
xfail('nn.functional.embedding', ''),
xfail('randn_like'),
xfail('allclose'),
xfail('bfloat16', 'channels_last'),
xfail('byte', 'channels_last'),
xfail('char', 'channels_last'),
xfail('double', 'channels_last'),
xfail('float', 'channels_last'),
xfail('half', 'channels_last'),
xfail('int', 'channels_last'),
xfail('long', 'channels_last'),
xfail('short', 'channels_last'),
xfail('bool', 'channels_last'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('rand_like'),
xfail('randint_like'),
xfail('nn.functional.fractional_max_pool3d'),
xfail('as_strided'),
xfail('nn.functional.fractional_max_pool2d'),
xfail('nn.functional.embedding_bag'),
xfail('nonzero'),
xfail('nn.functional.glu'),
xfail('nn.functional.rrelu'), # random?
}
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', (
tol1('linalg.det',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', vmap_fail)
def test_vmap_exhaustive(self, device, dtype, op):
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
try:
generator = get_fallback_and_vmap_exhaustive(op.op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in generator:
if op.name == 'empty_like' or op.name == 'new_empty':
self.assertEqual(loop_out.shape, batched_out.shape)
continue
self.assertEqual(loop_out, batched_out)
for a_op in op.aliases:
a_generator = get_fallback_and_vmap_exhaustive(a_op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in a_generator:
self.assertEqual(loop_out, batched_out)
except Exception as e:
if "dynamic" in e.args[0]:
continue
raise e
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', (
tol1('linalg.det',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', vmap_fail.union({
xfail('complex'),
xfail('copysign'),
xfail('eig'),
xfail('fill_'),
xfail('histogram'),
xfail('index_fill'),
xfail('index_put', ''),
xfail('isin'),
xfail('linalg.cholesky'),
xfail('linalg.eigvals'),
xfail('linalg.eigvalsh'),
xfail('linalg.householder_product'),
xfail('linalg.inv'),
xfail('linalg.lstsq'),
xfail('linalg.matrix_norm'),
xfail('linalg.matrix_power'),
xfail('linalg.matrix_rank'),
xfail('linalg.matrix_rank', 'hermitian'),
xfail('linalg.pinv'),
xfail('linalg.pinv', 'hermitian'),
xfail('linalg.norm'),
xfail('linalg.solve'),
xfail('linalg.tensorinv'),
xfail('lu_solve'),
xfail('lu_unpack'),
xfail('masked_fill'),
xfail('masked_scatter'),
xfail('masked_select'),
xfail('nanquantile'),
xfail('norm', 'nuc'),
xfail('ormqr'),
xfail('put'),
xfail('quantile'),
xfail('renorm'),
xfail('resize_as_'),
xfail('take'),
xfail('tensor_split'),
xfail('to_sparse'),
xfail('vdot'),
xfail('__getitem__', ''),
xfail('all'),
xfail('any'),
xfail('count_nonzero'),
xfail('nanmean'),
xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
xfail('resize_'),
xfail('view_as_complex'),
xfail('matrix_exp'),
xfail('bucketize'),
xfail('fft.ihfft2'),
xfail('fft.ihfftn'),
xfail('allclose'),
xfail('argwhere'),
xfail('bfloat16', 'channels_last'),
xfail('byte', 'channels_last'),
xfail('char', 'channels_last'),
xfail('double', 'channels_last'),
xfail('float', 'channels_last'),
xfail('half', 'channels_last'),
xfail('int', 'channels_last'),
xfail('bool', 'channels_last'),
xfail('linalg.cross'),
xfail('long', 'channels_last'),
xfail('searchsorted'),
xfail('short', 'channels_last'),
xfail('unique_consecutive'),
xfail('unique'),
xfail('nn.functional.ctc_loss'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('nn.functional.poisson_nll_loss'),
xfail('nn.functional.huber_loss'),
xfail('nn.functional.max_pool1d'),
xfail('nn.functional.max_pool3d'),
xfail('histc'),
xfail('as_strided'),
xfail('istft'),
xfail('nonzero'),
xfail('nn.functional.fractional_max_pool2d'),
xfail('stft'),
xfail('linalg.solve_triangular'),
xfail('nn.functional.glu'),
xfail('nn.functional.prelu'),
xfail('isclose'),
xfail('nn.functional.fractional_max_pool3d'),
xfail('nn.functional.bilinear'),
xfail('nn.functional.embedding_bag'),
xfail('linalg.tensorsolve'),
}))
def test_op_has_batch_rule(self, device, dtype, op):
def test():
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
generator = get_fallback_and_vmap_exhaustive(op.op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in generator:
if op.name == 'empty_like' or op.name == 'new_empty':
self.assertEqual(loop_out.shape, batched_out.shape)
continue
self.assertEqual(loop_out, batched_out)
for a_op in op.aliases:
a_generator = get_fallback_and_vmap_exhaustive(a_op, arg_values, kwarg_values, opinfo=op)
for loop_out, batched_out in a_generator:
self.assertEqual(loop_out, batched_out)
check_vmap_fallback(self, test, op)
def test_conv_double_backward(self, device):
images = torch.randn(2, 1, 5, 5, device=device)
weight = torch.randn(2, 1, 2, 2, device=device)
bias = torch.randn(2, device=device)
ggI = torch.randn_like(images)
ggW = torch.randn_like(weight)
ggb = torch.randn_like(bias)
stride = (1, 1)
padding = (0, 0)
dilation = (1, 1)
transposed = False
output_padding = (0, 0)
groups = 1
output_mask = (True, True, True)
gO = torch.randn_like(F.conv2d(images, weight, bias, stride, padding, dilation, groups))
args = (
ggI, ggW, ggb, gO, weight, images, stride, padding, dilation,
transposed, output_padding, groups, output_mask,
)
op = torch.ops.aten._convolution_double_backward
generator = get_fallback_and_vmap_exhaustive(op, args, {})
def test():
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out, atol=1e-4, rtol=1e-4)
check_vmap_fallback(self, test, op)
def test_isnan(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isnan
x = torch.randn(B, N, C, H, W)
x[x > 0] = float('nan')
test(self, op, (x,), in_dims=(0))
def test_isinf(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isinf
x = torch.randn(B, N, C, H, W)
x[x > 0] = float('inf')
test(self, op, (x,), in_dims=(0))
def test_foo_like(self, device):
B, N, C, H, W = 2, 3, 24, 5, 7
for op in [torch.ones_like, torch.zeros_like]:
x = torch.randn(B, N, C, H, W)
vmap(op, in_dims=(0,))(x,)
def test_flatten(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
op = torch.flatten
x = torch.randn(2, 3, 4, 5)
test(self, op, (x, 1, 2), in_dims=(0, None, None))
def test_group_norm(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = F.group_norm
x = torch.randn(B, N, C, H, W)
weight = torch.randn(C)
bias = torch.randn(C)
test(self, op, (x, 3, weight, bias), in_dims=(0, None, None, None))
x = torch.randn(B, N, C, H, W)
weight = torch.randn(B, C)
bias = torch.randn(B, C)
test(self, op, (x, 4, weight, bias), in_dims=(0, None, 0, 0))
def test_index_put(self, device):
def test(f, t, idx, values):
base = f(t[0], idx[0], values[0])
self.assertEqual(vmap(f, in_dims=(0, 0, 0))(t, idx, values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, None, None))(t, idx[0], values[0])[0], base)
self.assertEqual(vmap(f, in_dims=(0, None, 0))(t, idx[0], values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, 0, None))(t, idx, values[0])[0], base)
def f(x, y, z):
x[y] = z
return x
x = torch.randn(3, 4, 5, device=device)
y = torch.zeros((3, 2), device=device).long()
z = torch.randn(3, 2, 5, device=device)
test(f, x, y, z)
def f(t, idx, values):
t[:, idx] = values
return t
t = torch.zeros((3, 2, 3))
values = torch.ones((3, 1, 2))
idx = torch.tensor([[1, 2]]).expand((3, 2))
test(f, t, idx, values)
def f(t, idx, values):
t[:, idx, :] = values
return t
t = torch.zeros((3, 2, 3, 3))
values = torch.ones((3, 1, 2, 3))
idx = torch.tensor([[0, 2]]).expand((3, 2))
test(f, t, idx, values)
def f(t, values):
t[:, :2, :] = values
return t
base = f(t[0], values[0])
self.assertEqual(vmap(f, in_dims=(0, 0))(t, values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, None))(t, values[0])[0], base)
tensor = torch.zeros(3, 3, 4)
value = torch.ones(3, 2)
idxs = (torch.tensor([[0], [1], [2]]), torch.tensor([[0]]), torch.tensor([1, 2]))
expected = torch.index_put_(tensor.clone(), idxs, value)
def f(t, idx, v):
torch.index_put_(t, idx, v)
return t
self.assertEqual(vmap(f, in_dims=(0, (None, None), 0))(tensor, idxs[1:], value), expected)
self.assertEqual(vmap(f, in_dims=(0, (None, None), None))(tensor, idxs[1:], value[0]), expected)
@parametrize('training', [True, False])
@parametrize('track_running_stats', [True, False])
@parametrize('affine', [True, False])
def test_batch_norm(self, device, affine, track_running_stats, training):
if not track_running_stats and not training:
return
test = functools.partial(_vmap_test, check_propagates_grad=False)
BN = torch.nn.BatchNorm2d
ensemble_size = 10
hidden_dim = 3
weights, buffers, _, _, _ = \
functional_init_with_buffers(BN, [ensemble_size])(
hidden_dim, affine=affine, track_running_stats=track_running_stats)
inputs = [torch.randn(ensemble_size, 32, hidden_dim, 16, 16, device=device)]
in_dims = [0]
def append(inp, in_dim):
inputs.append(inp)
in_dims.append(in_dim)
if track_running_stats:
running_mean, running_var, _ = buffers
append(running_mean.to(device), 0)
append(running_var.to(device), 0)
else:
append(None, None)
append(None, None)
if affine:
weight, bias = weights
append(weight.to(device), 0)
append(bias.to(device), 0)
else:
append(None, None)
append(None, None)
append(training, None)
def op(inp, running_mean, running_var, weight, bias, training):
res = F.batch_norm(inp, running_mean, running_var, weight, bias, training)
if track_running_stats:
return res, running_mean, running_var
return res
test(self, op, tuple(inputs), in_dims=tuple(in_dims))
def test_torch_return_types_returns(self, device):
t = torch.randn(3, 2, 2, device=device)
self.assertTrue(isinstance(vmap(torch.min, (0, None))(t, 0), torch.return_types.min))
self.assertTrue(isinstance(vmap(torch.max, (0, None))(t, 0), torch.return_types.max))
self.assertTrue(isinstance(vmap(torch.topk, (0, None, None))(t, 1, 0), torch.return_types.topk))
self.assertTrue(isinstance(vmap(torch.linalg.eig, (0))(t), torch.return_types.linalg_eig))
def test_namedtuple_returns(self, device):
Point = namedtuple('Point', ['x', 'y'])
def f(x, y):
return Point(x=x, y=y)
x = torch.randn(2, 5, device=device)
y = torch.randn(2, 3, device=device)
self.assertTrue(isinstance(vmap(f)(x, y), Point))
def reset_random(self, generator, orig_state, use_generator, seed):
return generator.set_state(orig_state) if use_generator else torch.manual_seed(seed)
@parametrize('randomness', ['same', 'different', 'error'])
@parametrize('use_generator', [True, False])
def test_random_behavior(self, device, randomness, use_generator):
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = {'device': device, 'generator': generator} if use_generator else {'device': device}
only_gen_kwarg = {'generator': generator} if use_generator else {}
supported_random_ops = [
lambda _, shape: torch.randn(shape, **kwargs),
lambda _, shape: torch.rand(shape, **kwargs),
lambda _, shape: torch.randint(100, shape, **kwargs),
lambda _, shape: torch.randint(5, 100, shape, **kwargs),
lambda t, _: t.random_(**only_gen_kwarg),
lambda t, _: t.cauchy_(**only_gen_kwarg),
lambda t, _: t.exponential_(**only_gen_kwarg),
lambda t, _: t.geometric_(0.5, **only_gen_kwarg),
lambda t, _: t.log_normal_(**only_gen_kwarg),
lambda t, _: t.uniform_(**only_gen_kwarg),
lambda _, shape: torch.normal(0., 1., shape, **kwargs),
lambda t, _: t.normal_(**only_gen_kwarg),
lambda t, _: t.bernoulli_(torch.tensor([0.3, 0.4, 0.5, 0.6]), **only_gen_kwarg),
lambda t, _: t.bernoulli_(**only_gen_kwarg),
]
B0 = 4
seed = 1234567
passed = torch.randn(B0, device=device)
for op in supported_random_ops:
if randomness == 'error':
with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
vmap(op, in_dims=(0, None), randomness=randomness)(passed, [B0])
return
passed = torch.randn(B0, B0, B0, device=device)
generator = self.reset_random(generator, orig_state, use_generator, seed)
vmap_result = vmap(op, in_dims=(0, None), randomness=randomness)(passed, [B0, B0])
if randomness == "different":
passed = torch.randn([B0, B0, B0], device=device) # reset for in place operation
generator = self.reset_random(generator, orig_state, use_generator, seed)
expected = op(passed, [B0, B0, B0])
assert torch.allclose(vmap_result, expected)
else:
passed = torch.randn(B0, B0, device=device) # reset for in place operation
generator = self.reset_random(generator, orig_state, use_generator, seed)
expected = op(passed, [B0, B0])
for i in range(B0):
assert torch.allclose(vmap_result[i], expected)
@parametrize('randomness', ['same', 'different', 'error'])
@parametrize('use_generator', [True, False])
def test_randperm(self, device, randomness, use_generator):
B0 = 4
seed = 1234567
passed = torch.randn(B0, device=device)
torch.manual_seed(seed)
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = {'device': device, 'generator': generator} if use_generator else {'device': device}
if randomness == 'error':
with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
vmap(lambda _: torch.randperm(10, **kwargs), randomness=randomness)(passed)
return
vmap_result = vmap(lambda _: torch.randperm(10, **kwargs), randomness=randomness)(passed)
generator = generator.set_state(orig_state)
torch.manual_seed(seed)
if randomness == 'different':
for i in range(B0):
expected =
|
torch
|
torch.randperm(10, **kwargs)
|
assert torch.allclose(vmap_result[i], expected)
else:
expected = torch.randperm(10, **kwargs)
for i in range(B0):
|
torch
|
torch.randperm
|
torch_direct_api
|
v_1_10_0
|
import torch
from torch import fx
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
import torch
import unittest
from torch import fx
from functorch.compile import pointwise_operator
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
LLVM_ENABLED =
|
torch
|
torch._C._llvm_enabled()
|
HAS_CUDA = torch.cuda.is_available()
HAS_SYMPY = False
try:
import sympy # noqa: F401
|
torch
|
torch._C._llvm_enabled
|
torch_direct_api
|
v_1_10_0
|
import torch
from torch import fx
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
import torch
import unittest
from torch import fx
from functorch.compile import pointwise_operator
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
LLVM_ENABLED = torch._C._llvm_enabled()
HAS_CUDA =
|
torch
|
torch.cuda.is_available()
|
HAS_SYMPY = False
try:
import sympy # noqa: F401
|
torch
|
torch.cuda.is_available
|
torch_direct_api
|
v_1_10_0
|
import torch
from torch import fx
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
class TestOperatorAuthoring(JitTestCase):
device = None
def rand(self, *args, dtype=torch.float32, **kwargs):
return torch.randint(0, 100, args, dtype=dtype, device=self.device, **kwargs)
def check(self, *args):
result_aten = pointwise_fn(*args)
result_nnc = nnc_pointwise_fn(*args)
self.assertEqual(result_nnc.dtype, result_aten.dtype)
self.assertEqual(result_nnc.size(), result_aten.size())
self.assertEqual(result_nnc.stride(), result_aten.stride())
self.assertEqual(result_nnc.requires_grad, result_aten.requires_grad)
torch.testing.assert_allclose(result_aten, result_nnc)
def test_broadcast1(self):
self.check(self.rand(8, 16), self.rand(1))
def test_broadcast2(self):
self.check(self.rand(8, 1), self.rand(1, 8))
def test_transposed1(self):
self.check(self.rand(7, 3), self.rand(3, 7).transpose(0, 1))
def test_transposed2(self):
self.check(self.rand(8, 16).transpose(0, 1), self.rand(8, 16).transpose(0, 1))
def test_slice1(self):
self.check(self.rand(20, 20, 2)[:8, :16, 0], self.rand(8, 16))
def test_slice2(self):
self.check(self.rand(8, 16, 2)[:, :, 0], self.rand(8, 16, 2)[:, :, 0])
def test_issue57611(self):
self.check(self.rand(1, 32, 32, 2), self.rand(2, 1, 1, 2))
def test_float_double(self):
self.check(self.rand(8, 16), self.rand(8, 16, dtype=torch.float64))
def test_int_long(self):
self.check(
self.rand(8, 16, dtype=torch.int32), self.rand(1, 1, dtype=torch.int64)
)
def test_float_int(self):
self.check(
self.rand(8, 16, dtype=torch.float32), self.rand(8, 16, dtype=torch.int32)
)
@unittest.skipIf(not HAS_SYMPY, "currently requires sympy")
def test_requires_grad(self):
self.check(self.rand(4, 2), self.rand(4, 2, requires_grad=True))
@unittest.skipIf(not HAS_SYMPY, "currently requires sympy")
def test_backwards(self):
def grads(fn):
a = self.rand(4, 2, requires_grad=True)
b = self.rand(4, 2, requires_grad=True)
c = self.rand(4, 2)
d = self.rand(4, 2)
fn(fn(a, fn(b, c)), d).sum().backward()
return a.grad, b.grad
a1, b1 = grads(pointwise_fn)
a2, b2 = grads(nnc_pointwise_fn)
torch.testing.assert_allclose(a1, a2)
torch.testing.assert_allclose(b1, b2)
def test_torch_function(self):
self.check(self.rand(10), TorchFunctionExample())
def test_fx_trace(self):
def example(x):
return custom1(custom2(x))
graph =
|
fx
|
fx.symbolic_trace(example)
|
self.assertIn("custom1", graph.code)
self.assertIn("custom2", graph.code)
x = torch.randn(8, device=self.device)
torch.testing.assert_allclose(x + 3, graph(x))
|
torch
|
torch.fx.symbolic_trace
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleCNN(nn.Module):
def __init__(self):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x =
|
F
|
F.max_pool2d(x, 2)
|
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
|
torch
|
torch.nn.functional.max_pool2d
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.benchmark import Timer
"""
=============================
Jacobians, hessians, and more
=============================
Computing jacobians or hessians are useful in a number of non-traditional
deep learning models. It is difficult (or annoying) to compute these quantities
efficiently using a standard autodiff system like PyTorch Autograd; functorch
provides ways of computing various higher-order autodiff quantities efficiently.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
torch.manual_seed(0)
def predict(weight, bias, x):
return F.linear(x, weight, bias).tanh()
D = 16
weight = torch.randn(D, D)
bias = torch.randn(D)
x = torch.randn(D)
xp = x.clone().requires_grad_()
unit_vectors = torch.eye(D)
def compute_jac(xp):
jacobian_rows = [torch.autograd.grad(predict(weight, bias, xp), xp, vec)[0]
for vec in unit_vectors]
return torch.stack(jacobian_rows)
jacobian = compute_jac(xp)
from functorch import vmap, vjp
_, vjp_fn = vjp(partial(predict, weight, bias), x)
ft_jacobian, = vmap(vjp_fn)(unit_vectors)
assert torch.allclose(ft_jacobian, jacobian)
from functorch import jacrev
ft_jacobian = jacrev(predict, argnums=2)(weight, bias, x)
assert torch.allclose(ft_jacobian, jacobian)
from torch.utils.benchmark import Timer
without_vmap =
|
Timer
|
Timer(stmt="compute_jac(xp)", globals=globals())
|
with_vmap = Timer(stmt="jacrev(predict, argnums=2)(weight, bias, x)", globals=globals())
print(without_vmap.timeit(500))
print(with_vmap.timeit(500))
|
torch
|
torch.utils.benchmark.Timer
|
torch_direct_api
|
v_1_10_0
|
import torch.fx as fx
import torch
from torch.fx.node import map_aggregate
class ConcreteProp(torch.fx.Interpreter):
def run_node(self, n):
result = super().run_node(n)
found_tensor = False
def extract_tensor_meta(obj):
if isinstance(obj, torch.Tensor):
nonlocal found_tensor
found_tensor = True
return obj
else:
return obj
from torch.fx.node import map_aggregate
concrete_value =
|
map
|
map_aggregate(result, extract_tensor_meta)
|
if found_tensor:
n.meta['concrete_value'] = concrete_value
return result
|
torch
|
torch.fx.node.map_aggregate
|
torch_direct_api
|
v_1_10_0
|
import torch.fx as fx
import torch
from torch.fx.node import map_aggregate
def minifier(fail_f: fx.GraphModule, inps, module_fails):
"""
Minimizes a FX graph with given inputs, such that the resulting FX graph still returns True for module_fails.
Does 2 main strategies:
1. Truncates suffix: Removes some suffix from the graph and sets a new output.
2. Delta Debugging: Tries replacing half of the graph with inputs. If fails,
tries replacing quarter of the graph, etc.
>>> failing_function = fx.symbolic_trace(f)
>>> minimize(failing_function, [torch.randn(5)], lambda fx_g, inps: fx_g(*inps))
note: module_fails returns True if it fails.
"""
failing_graph = fail_f.graph
cur_size = len(failing_graph.nodes)
def graph_fails(graph, inps):
mod =
|
fx
|
fx.GraphModule(fail_f, graph)
|
mod.graph.lint()
return module_fails(mod, inps)
ConcreteProp(fail_f).propagate(*inps)
|
torch
|
torch.fx.GraphModule
|
torch_direct_api
|
v_1_10_0
|
import torch.fx as fx
import torch
from torch.fx.node import map_aggregate
def minifier(fail_f: fx.GraphModule, inps, module_fails):
"""
Minimizes a FX graph with given inputs, such that the resulting FX graph still returns True for module_fails.
Does 2 main strategies:
1. Truncates suffix: Removes some suffix from the graph and sets a new output.
2. Delta Debugging: Tries replacing half of the graph with inputs. If fails,
tries replacing quarter of the graph, etc.
>>> failing_function = fx.symbolic_trace(f)
>>> minimize(failing_function, [torch.randn(5)], lambda fx_g, inps: fx_g(*inps))
note: module_fails returns True if it fails.
"""
failing_graph = fail_f.graph
cur_size = len(failing_graph.nodes)
def graph_fails(graph, inps):
mod = fx.GraphModule(fail_f, graph)
mod.graph.lint()
return module_fails(mod, inps)
ConcreteProp(fail_f).propagate(*inps)
if not graph_fails(failing_graph, inps):
raise RuntimeError("Input graph did not fail the tester")
print(f"Started off with {cur_size} nodes")
def remove_suffix(cur_graph, cur_inps):
print("Strategy: Remove suffix")
assert graph_fails(cur_graph, cur_inps)
gap = 2**math.floor(math.log2(len(cur_graph.nodes)))
tested = set()
while gap >= 1:
new_graph =
|
fx
|
fx.Graph()
|
env = {}
for idx, node in enumerate(cur_graph.nodes):
new_node = new_graph.node_copy(node, lambda x: env[x])
if node.op not in ['placeholder', 'output']:
|
torch
|
torch.fx.Graph
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
from torch import Tensor
def extract_buffers(mod: nn.Module) -> Tuple[Tuple[Tensor, ...], List[str]]:
num_orig_params_with_duplicates = len(tuple(_named_buffers(mod, remove_duplicate=False)))
orig_params = tuple(mod.buffers())
if len(orig_params) != num_orig_params_with_duplicates:
raise_parameter_tying_error()
names = []
for name, p in list(mod.named_buffers()):
replacement =
|
torch
|
torch.empty_like(p, device='meta')
|
_set_nested_attr(mod, name.split("."), replacement)
names.append(name)
|
torch
|
torch.empty_like
|
torch_direct_api
|
v_1_10_0
|
import torch
from torch import Tensor
from torch.utils._pytree import tree_flatten, tree_unflatten, _broadcast_to_and_flatten, TreeSpec, _register_pytree_node
def _process_batched_inputs(
in_dims: in_dims_t, args: Tuple, func: Callable
) -> Tuple[int, List[Any], List[Any], TreeSpec]:
if not isinstance(in_dims, int) and not isinstance(in_dims, tuple):
raise ValueError(
f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): '
f'expected `in_dims` to be int or a (potentially nested) tuple '
f'matching the structure of inputs, got: {type(in_dims)}.')
if len(args) == 0:
raise ValueError(
f'vmap({_get_name(func)})(<inputs>): got no inputs. Maybe you forgot to add '
f'inputs, or you are trying to vmap over a function with no inputs. '
f'The latter is unsupported.')
flat_args, args_spec = tree_flatten(args)
flat_in_dims =
|
_broadcast_to_and_flatten(in_dims, args_spec)
|
if flat_in_dims is None:
raise ValueError(
f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): '
f'in_dims is not compatible with the structure of `inputs`. '
|
torch
|
torch.utils._pytree._broadcast_to_and_flatten
|
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.fx as fx
import torch.utils._pytree as pytree
from torch.fx.passes import graph_drawer
def draw_graph(traced: torch.fx.GraphModule, fname: str, figname: str = "fx_graph", clear_meta=True):
if clear_meta:
new_graph = copy.deepcopy(traced.graph)
traced = fx.GraphModule(traced, new_graph)
for node in traced.graph.nodes:
node.meta = {}
base, ext = os.path.splitext(fname)
if not ext:
ext = ".svg"
print(f"Writing FX graph to file: {base}{ext}")
g =
|
graph
|
graph_drawer.FxGraphDrawer(traced, figname)
|
x = g.get_main_dot_graph()
getattr(x, "write_" + ext.lstrip("."))(f"{base}{ext}")
|
torch
|
torch.fx.passes.graph_drawer.FxGraphDrawer
|
torch_direct_api
|
v_1_10_0
|
import torch
from torch import fx
from torch._C import _te # type: ignore[attr-defined]
class PointwiseCompiler(object):
def __init__(
self,
name: str,
module_name: str,
pointwise_fn: Callable,
spec: List,
result: PointwiseOperatorCompileResult,
):
self.name = name
self.module_name = module_name
self.pointwise_fn = pointwise_fn
self.spec = spec
self.result = result
self.ndim = max(x.ndim for x in spec)
self.shapes = [["one"] * (self.ndim - x.ndim) + x.shape for x in spec]
self.strides = [["zero"] * (self.ndim - x.ndim) + x.stride for x in spec]
self.shape_flags = copy.deepcopy(self.shapes)
self.stride_flags = copy.deepcopy(self.strides)
self.shape_args = [
|
_te.VarHandle(torch.int32)
|
for _ in range(self.ndim)]
self.shape_vars = list(self.shape_args)
self.iter_vars = [_te.VarHandle(torch.int32) for _ in range(self.ndim)]
self.stride_args: List[_te.VarHandle] = []
self.strides_from: List[Tuple[int, int]] = []
|
torch
|
torch._C._te.VarHandle
|
|
torch_direct_api
|
v_1_10_0
|
import torch
from torch import fx
from torch._C import _te # type: ignore[attr-defined]
class PointwiseCompiler(object):
def __init__(
self,
name: str,
module_name: str,
pointwise_fn: Callable,
spec: List,
result: PointwiseOperatorCompileResult,
):
self.name = name
self.module_name = module_name
self.pointwise_fn = pointwise_fn
self.spec = spec
self.result = result
self.ndim = max(x.ndim for x in spec)
self.shapes = [["one"] * (self.ndim - x.ndim) + x.shape for x in spec]
self.strides = [["zero"] * (self.ndim - x.ndim) + x.stride for x in spec]
self.shape_flags = copy.deepcopy(self.shapes)
self.stride_flags = copy.deepcopy(self.strides)
self.shape_args = [_te.VarHandle(torch.int32) for _ in range(self.ndim)]
self.shape_vars = list(self.shape_args)
self.iter_vars = [_te.VarHandle(torch.int32) for _ in range(self.ndim)]
self.stride_args: List[_te.VarHandle] = []
self.strides_from: List[Tuple[int, int]] = []
self.broadcasts: List[Tuple[int, int]] = []
self.output_order: List[int] = []
(self.device,) = list(set(x.device.type for x in spec))
self.compile_mode = {"cpu": "llvm", "cuda": "cuda"}[self.device]
if spec[-1].out:
self.dtype = spec[-1].dtype
else:
self.dtype = functools.reduce(_combine_dtype, [x.dtype for x in spec])
self.run()
def add_stride_arg(self, a, d):
var = _te.VarHandle(torch.int32)
self.stride_args.append(var)
self.strides_from.append((a, d))
return var
def replace_shape(self, a, d, expected, replacement):
if self.shapes[a][d] == expected:
self.shapes[a][d] = replacement()
def replace_stride(self, a, d, expected, replacement):
if self.strides[a][d] == expected:
self.strides[a][d] = replacement()
def error_checks(self):
spec = self.spec
(layout,) = list(set(x.layout for x in spec))
assert layout == torch.strided, "TODO: support other layouts"
assert [x.out for x in spec[:-1]] == [False] * (len(spec) - 1)
assert all(
shape_type in _SHAPE_TYPES for shape_type in itertools.chain(*self.shapes)
)
assert all(
stride_type in _STRIDE_TYPES
for stride_type in itertools.chain(*self.strides)
)
def make_backwards(self, index: int):
"""
Compute the derivative of self.pointwise_fn with respect to input number index
"""
from sympy import symbols, diff # type: ignore[import]
vars = symbols([f"v{i}" for i in range(1 + _num_args(self.pointwise_fn))])
backwards_expr = (
diff(self.pointwise_fn(*vars[:-1]), vars[index]) * vars[-1]
) # chain rule
return _source_to_pointwise_operator(
f"lambda {','.join(map(str, vars))}: {backwards_expr}",
name=f"{self.name}.backwards{index}",
module_name=self.module_name,
)
def handle_autograd(self):
cnt = sum(int(x.requires_grad) for x in self.spec)
if cnt == 0:
return
assert all(
x.alias_group == 0 for x in self.spec
), "TODO: support aliased backwards"
for i, spec in enumerate(self.spec):
if spec.requires_grad:
assert spec.alias_group == 0, "TODO: support aliased backwards"
assert spec.out == 0, "TODO: support autograd on out= ?"
for d in range(self.ndim):
shape_types = {shape[d] for shape in self.shapes}
assert (
len(shape_types) == 1
), "TODO: support backwards for broadcasting"
self.result.set_backwards(i, self.make_backwards(i))
def compute_broadcasts_and_size_checks(self):
ndim = self.ndim
spec = self.spec
nargs = len(spec)
longest = _argmax([x.ndim for x in spec])
shapes = self.shapes
shape_from = [(longest, d) for d in range(ndim)]
for d in range(ndim):
first = None
for a in range(nargs):
if shapes[a][d] == "one":
self.broadcasts.append((a, d))
elif shapes[a][d] == "other":
if first is None:
shape_from[d] = first = (a, d - (ndim - spec[a].ndim))
else:
self.result.add_shape_check(
(first[0], first[1], a, d - (ndim - spec[a].ndim))
)
if all(shapes[a][d] == "one" for a in range(nargs)):
self.shape_vars[d] = _one()
self.result.set_shape_from(shape_from)
def compute_output_order(self):
"""
Decide on an iteration order (permutation) for the dimensions of the output
"""
ndim = self.ndim
strides = self.strides
output_order = []
output_order_remaining = [[i] for i in range(ndim)]
for d in reversed(range(ndim)):
if strides[0][d] == "one":
output_order.extend(output_order_remaining[d])
output_order_remaining[d].clear()
for d in reversed(range(ndim)):
if strides[0][d] == "transposed_contiguous":
output_order_remaining[d - 1].extend(output_order_remaining[d])
output_order_remaining[d].clear()
for d in reversed(range(ndim)):
output_order.extend(output_order_remaining[d])
output_order_remaining[d].clear()
assert not self.output_order
self.output_order = output_order
assert sorted(output_order) == list(range(ndim))
def compute_symbolic_shapes_and_strides(self):
nargs = len(self.spec)
ndim = self.ndim
shapes = self.shapes
strides = self.strides
for a in range(nargs):
for d in range(ndim):
self.replace_shape(a, d, "one", _one)
self.replace_shape(a, d, "other", lambda: self.shape_args[d])
self.replace_stride(a, d, "zero", _zero)
self.replace_stride(a, d, "one", _one)
if strides[a][d] == "as_arg":
strides[a][d] = self.add_stride_arg(a, d)
while any(isinstance(x, str) for x in strides[a]):
for d in reversed(range(ndim)):
self.replace_stride(
a, d, "contiguous", lambda: strides[a][d + 1] * shapes[a][d + 1]
)
if isinstance(strides[a][d], str):
break
for d in range(ndim):
self.replace_stride(
a,
d,
"transposed_contiguous",
lambda: strides[a][d - 1] * shapes[a][d - 1],
)
if isinstance(strides[a][d], str):
break
for a, d in self.broadcasts:
strides[a][d] = _zero()
self.result.set_stride_args_from(self.strides_from)
def indexing(self, stride):
result = _zero()
for c, s in zip(self.iter_vars, stride):
result = result + c * s
return result
def compute_code(self):
bufs = [
|
_te.BufHandle(s.dtype)
|
for s in self.spec]
if not self.spec[-1].out:
options_from = [
i for i in range(len(self.spec)) if self.spec[i].dtype == self.dtype
][0]
|
torch
|
torch._C._te.BufHandle
|
|
torch_direct_api
|
v_1_10_0
|
import torch
from torch import fx
from torch._C import _te # type: ignore[attr-defined]
class PointwiseCompiler(object):
def __init__(
self,
name: str,
module_name: str,
pointwise_fn: Callable,
spec: List,
result: PointwiseOperatorCompileResult,
):
self.name = name
self.module_name = module_name
self.pointwise_fn = pointwise_fn
self.spec = spec
self.result = result
self.ndim = max(x.ndim for x in spec)
self.shapes = [["one"] * (self.ndim - x.ndim) + x.shape for x in spec]
self.strides = [["zero"] * (self.ndim - x.ndim) + x.stride for x in spec]
self.shape_flags = copy.deepcopy(self.shapes)
self.stride_flags = copy.deepcopy(self.strides)
self.shape_args = [_te.VarHandle(torch.int32) for _ in range(self.ndim)]
self.shape_vars = list(self.shape_args)
self.iter_vars = [_te.VarHandle(torch.int32) for _ in range(self.ndim)]
self.stride_args: List[_te.VarHandle] = []
self.strides_from: List[Tuple[int, int]] = []
self.broadcasts: List[Tuple[int, int]] = []
self.output_order: List[int] = []
(self.device,) = list(set(x.device.type for x in spec))
self.compile_mode = {"cpu": "llvm", "cuda": "cuda"}[self.device]
if spec[-1].out:
self.dtype = spec[-1].dtype
else:
self.dtype = functools.reduce(_combine_dtype, [x.dtype for x in spec])
self.run()
def add_stride_arg(self, a, d):
var = _te.VarHandle(torch.int32)
self.stride_args.append(var)
self.strides_from.append((a, d))
return var
def replace_shape(self, a, d, expected, replacement):
if self.shapes[a][d] == expected:
self.shapes[a][d] = replacement()
def replace_stride(self, a, d, expected, replacement):
if self.strides[a][d] == expected:
self.strides[a][d] = replacement()
def error_checks(self):
spec = self.spec
(layout,) = list(set(x.layout for x in spec))
assert layout == torch.strided, "TODO: support other layouts"
assert [x.out for x in spec[:-1]] == [False] * (len(spec) - 1)
assert all(
shape_type in _SHAPE_TYPES for shape_type in itertools.chain(*self.shapes)
)
assert all(
stride_type in _STRIDE_TYPES
for stride_type in itertools.chain(*self.strides)
)
def make_backwards(self, index: int):
"""
Compute the derivative of self.pointwise_fn with respect to input number index
"""
from sympy import symbols, diff # type: ignore[import]
vars = symbols([f"v{i}" for i in range(1 + _num_args(self.pointwise_fn))])
backwards_expr = (
diff(self.pointwise_fn(*vars[:-1]), vars[index]) * vars[-1]
) # chain rule
return _source_to_pointwise_operator(
f"lambda {','.join(map(str, vars))}: {backwards_expr}",
name=f"{self.name}.backwards{index}",
module_name=self.module_name,
)
def handle_autograd(self):
cnt = sum(int(x.requires_grad) for x in self.spec)
if cnt == 0:
return
assert all(
x.alias_group == 0 for x in self.spec
), "TODO: support aliased backwards"
for i, spec in enumerate(self.spec):
if spec.requires_grad:
assert spec.alias_group == 0, "TODO: support aliased backwards"
assert spec.out == 0, "TODO: support autograd on out= ?"
for d in range(self.ndim):
shape_types = {shape[d] for shape in self.shapes}
assert (
len(shape_types) == 1
), "TODO: support backwards for broadcasting"
self.result.set_backwards(i, self.make_backwards(i))
def compute_broadcasts_and_size_checks(self):
ndim = self.ndim
spec = self.spec
nargs = len(spec)
longest = _argmax([x.ndim for x in spec])
shapes = self.shapes
shape_from = [(longest, d) for d in range(ndim)]
for d in range(ndim):
first = None
for a in range(nargs):
if shapes[a][d] == "one":
self.broadcasts.append((a, d))
elif shapes[a][d] == "other":
if first is None:
shape_from[d] = first = (a, d - (ndim - spec[a].ndim))
else:
self.result.add_shape_check(
(first[0], first[1], a, d - (ndim - spec[a].ndim))
)
if all(shapes[a][d] == "one" for a in range(nargs)):
self.shape_vars[d] = _one()
self.result.set_shape_from(shape_from)
def compute_output_order(self):
"""
Decide on an iteration order (permutation) for the dimensions of the output
"""
ndim = self.ndim
strides = self.strides
output_order = []
output_order_remaining = [[i] for i in range(ndim)]
for d in reversed(range(ndim)):
if strides[0][d] == "one":
output_order.extend(output_order_remaining[d])
output_order_remaining[d].clear()
for d in reversed(range(ndim)):
if strides[0][d] == "transposed_contiguous":
output_order_remaining[d - 1].extend(output_order_remaining[d])
output_order_remaining[d].clear()
for d in reversed(range(ndim)):
output_order.extend(output_order_remaining[d])
output_order_remaining[d].clear()
assert not self.output_order
self.output_order = output_order
assert sorted(output_order) == list(range(ndim))
def compute_symbolic_shapes_and_strides(self):
nargs = len(self.spec)
ndim = self.ndim
shapes = self.shapes
strides = self.strides
for a in range(nargs):
for d in range(ndim):
self.replace_shape(a, d, "one", _one)
self.replace_shape(a, d, "other", lambda: self.shape_args[d])
self.replace_stride(a, d, "zero", _zero)
self.replace_stride(a, d, "one", _one)
if strides[a][d] == "as_arg":
strides[a][d] = self.add_stride_arg(a, d)
while any(isinstance(x, str) for x in strides[a]):
for d in reversed(range(ndim)):
self.replace_stride(
a, d, "contiguous", lambda: strides[a][d + 1] * shapes[a][d + 1]
)
if isinstance(strides[a][d], str):
break
for d in range(ndim):
self.replace_stride(
a,
d,
"transposed_contiguous",
lambda: strides[a][d - 1] * shapes[a][d - 1],
)
if isinstance(strides[a][d], str):
break
for a, d in self.broadcasts:
strides[a][d] = _zero()
self.result.set_stride_args_from(self.strides_from)
def indexing(self, stride):
result = _zero()
for c, s in zip(self.iter_vars, stride):
result = result + c * s
return result
def compute_code(self):
bufs = [_te.BufHandle(s.dtype) for s in self.spec]
if not self.spec[-1].out:
options_from = [
i for i in range(len(self.spec)) if self.spec[i].dtype == self.dtype
][0]
self.result.add_allocated_output(options_from, self.output_order)
bufs.append(_te.BufHandle(self.dtype))
self.shapes.append(list(self.shape_vars))
output_strides = [None] * self.ndim
next_stride = _one()
for i in self.output_order:
output_strides[i] = next_stride
next_stride *= self.shape_vars[i]
assert all((x is not None) for x in output_strides)
self.strides.append(output_strides)
bufs_args = list(bufs)
aliases = {}
for i, s in enumerate(self.spec):
assert s.alias_group >= 0, "TODO: support complex aliasing"
if s.alias_group > 0 and s.alias_group not in aliases:
aliases[s.alias_group] = i
elif s.alias_group > 0 and FOLD_ALIASES:
bufs[i] = bufs[aliases[s.alias_group]]
input_bufs = bufs[:-1]
input_strides = self.strides[:-1]
output_bufs = bufs[-1:]
output_strides = self.strides[-1:]
inputs = [
_te.Cast.make(self.dtype, buf.load(self.indexing(stride)))
for buf, stride in zip(input_bufs, input_strides)
]
val = _fx_to_expr(self.pointwise_fn, self.dtype)(*inputs)
out = _te.Block(
[
buf.store(self.indexing(stride), val)
for buf, stride in zip(output_bufs, output_strides)
]
)
loops: List[_te.For] = []
for i in self.output_order:
var = self.iter_vars[i]
size = self.shape_vars[i]
out =
|
_te.For.make(var, _zero(), size, out)
|
loops.insert(0, out)
loopnest = _te.LoopNest(_te.Block([out]), output_bufs)
|
torch
|
torch._C._te.For.make
|
|
torch_direct_api
|
v_1_10_0
|
import torch
from torch import fx
from torch._C import _te # type: ignore[attr-defined]
class PointwiseCompiler(object):
def __init__(
self,
name: str,
module_name: str,
pointwise_fn: Callable,
spec: List,
result: PointwiseOperatorCompileResult,
):
self.name = name
self.module_name = module_name
self.pointwise_fn = pointwise_fn
self.spec = spec
self.result = result
self.ndim = max(x.ndim for x in spec)
self.shapes = [["one"] * (self.ndim - x.ndim) + x.shape for x in spec]
self.strides = [["zero"] * (self.ndim - x.ndim) + x.stride for x in spec]
self.shape_flags = copy.deepcopy(self.shapes)
self.stride_flags = copy.deepcopy(self.strides)
self.shape_args = [_te.VarHandle(torch.int32) for _ in range(self.ndim)]
self.shape_vars = list(self.shape_args)
self.iter_vars = [_te.VarHandle(torch.int32) for _ in range(self.ndim)]
self.stride_args: List[_te.VarHandle] = []
self.strides_from: List[Tuple[int, int]] = []
self.broadcasts: List[Tuple[int, int]] = []
self.output_order: List[int] = []
(self.device,) = list(set(x.device.type for x in spec))
self.compile_mode = {"cpu": "llvm", "cuda": "cuda"}[self.device]
if spec[-1].out:
self.dtype = spec[-1].dtype
else:
self.dtype = functools.reduce(_combine_dtype, [x.dtype for x in spec])
self.run()
def add_stride_arg(self, a, d):
var = _te.VarHandle(torch.int32)
self.stride_args.append(var)
self.strides_from.append((a, d))
return var
def replace_shape(self, a, d, expected, replacement):
if self.shapes[a][d] == expected:
self.shapes[a][d] = replacement()
def replace_stride(self, a, d, expected, replacement):
if self.strides[a][d] == expected:
self.strides[a][d] = replacement()
def error_checks(self):
spec = self.spec
(layout,) = list(set(x.layout for x in spec))
assert layout == torch.strided, "TODO: support other layouts"
assert [x.out for x in spec[:-1]] == [False] * (len(spec) - 1)
assert all(
shape_type in _SHAPE_TYPES for shape_type in itertools.chain(*self.shapes)
)
assert all(
stride_type in _STRIDE_TYPES
for stride_type in itertools.chain(*self.strides)
)
def make_backwards(self, index: int):
"""
Compute the derivative of self.pointwise_fn with respect to input number index
"""
from sympy import symbols, diff # type: ignore[import]
vars = symbols([f"v{i}" for i in range(1 + _num_args(self.pointwise_fn))])
backwards_expr = (
diff(self.pointwise_fn(*vars[:-1]), vars[index]) * vars[-1]
) # chain rule
return _source_to_pointwise_operator(
f"lambda {','.join(map(str, vars))}: {backwards_expr}",
name=f"{self.name}.backwards{index}",
module_name=self.module_name,
)
def handle_autograd(self):
cnt = sum(int(x.requires_grad) for x in self.spec)
if cnt == 0:
return
assert all(
x.alias_group == 0 for x in self.spec
), "TODO: support aliased backwards"
for i, spec in enumerate(self.spec):
if spec.requires_grad:
assert spec.alias_group == 0, "TODO: support aliased backwards"
assert spec.out == 0, "TODO: support autograd on out= ?"
for d in range(self.ndim):
shape_types = {shape[d] for shape in self.shapes}
assert (
len(shape_types) == 1
), "TODO: support backwards for broadcasting"
self.result.set_backwards(i, self.make_backwards(i))
def compute_broadcasts_and_size_checks(self):
ndim = self.ndim
spec = self.spec
nargs = len(spec)
longest = _argmax([x.ndim for x in spec])
shapes = self.shapes
shape_from = [(longest, d) for d in range(ndim)]
for d in range(ndim):
first = None
for a in range(nargs):
if shapes[a][d] == "one":
self.broadcasts.append((a, d))
elif shapes[a][d] == "other":
if first is None:
shape_from[d] = first = (a, d - (ndim - spec[a].ndim))
else:
self.result.add_shape_check(
(first[0], first[1], a, d - (ndim - spec[a].ndim))
)
if all(shapes[a][d] == "one" for a in range(nargs)):
self.shape_vars[d] = _one()
self.result.set_shape_from(shape_from)
def compute_output_order(self):
"""
Decide on an iteration order (permutation) for the dimensions of the output
"""
ndim = self.ndim
strides = self.strides
output_order = []
output_order_remaining = [[i] for i in range(ndim)]
for d in reversed(range(ndim)):
if strides[0][d] == "one":
output_order.extend(output_order_remaining[d])
output_order_remaining[d].clear()
for d in reversed(range(ndim)):
if strides[0][d] == "transposed_contiguous":
output_order_remaining[d - 1].extend(output_order_remaining[d])
output_order_remaining[d].clear()
for d in reversed(range(ndim)):
output_order.extend(output_order_remaining[d])
output_order_remaining[d].clear()
assert not self.output_order
self.output_order = output_order
assert sorted(output_order) == list(range(ndim))
def compute_symbolic_shapes_and_strides(self):
nargs = len(self.spec)
ndim = self.ndim
shapes = self.shapes
strides = self.strides
for a in range(nargs):
for d in range(ndim):
self.replace_shape(a, d, "one", _one)
self.replace_shape(a, d, "other", lambda: self.shape_args[d])
self.replace_stride(a, d, "zero", _zero)
self.replace_stride(a, d, "one", _one)
if strides[a][d] == "as_arg":
strides[a][d] = self.add_stride_arg(a, d)
while any(isinstance(x, str) for x in strides[a]):
for d in reversed(range(ndim)):
self.replace_stride(
a, d, "contiguous", lambda: strides[a][d + 1] * shapes[a][d + 1]
)
if isinstance(strides[a][d], str):
break
for d in range(ndim):
self.replace_stride(
a,
d,
"transposed_contiguous",
lambda: strides[a][d - 1] * shapes[a][d - 1],
)
if isinstance(strides[a][d], str):
break
for a, d in self.broadcasts:
strides[a][d] = _zero()
self.result.set_stride_args_from(self.strides_from)
def indexing(self, stride):
result = _zero()
for c, s in zip(self.iter_vars, stride):
result = result + c * s
return result
def compute_code(self):
bufs = [_te.BufHandle(s.dtype) for s in self.spec]
if not self.spec[-1].out:
options_from = [
i for i in range(len(self.spec)) if self.spec[i].dtype == self.dtype
][0]
self.result.add_allocated_output(options_from, self.output_order)
bufs.append(_te.BufHandle(self.dtype))
self.shapes.append(list(self.shape_vars))
output_strides = [None] * self.ndim
next_stride = _one()
for i in self.output_order:
output_strides[i] = next_stride
next_stride *= self.shape_vars[i]
assert all((x is not None) for x in output_strides)
self.strides.append(output_strides)
bufs_args = list(bufs)
aliases = {}
for i, s in enumerate(self.spec):
assert s.alias_group >= 0, "TODO: support complex aliasing"
if s.alias_group > 0 and s.alias_group not in aliases:
aliases[s.alias_group] = i
elif s.alias_group > 0 and FOLD_ALIASES:
bufs[i] = bufs[aliases[s.alias_group]]
input_bufs = bufs[:-1]
input_strides = self.strides[:-1]
output_bufs = bufs[-1:]
output_strides = self.strides[-1:]
inputs = [
_te.Cast.make(self.dtype, buf.load(self.indexing(stride)))
for buf, stride in zip(input_bufs, input_strides)
]
val = _fx_to_expr(self.pointwise_fn, self.dtype)(*inputs)
out = _te.Block(
[
buf.store(self.indexing(stride), val)
for buf, stride in zip(output_bufs, output_strides)
]
)
loops: List[_te.For] = []
for i in self.output_order:
var = self.iter_vars[i]
size = self.shape_vars[i]
out = _te.For.make(var, _zero(), size, out)
loops.insert(0, out)
loopnest =
|
_te.LoopNest(_te.Block([out]), output_bufs)
|
if self.device == "cuda" and loops:
flattened = loopnest.flatten(loops)
assert flattened
|
torch
|
torch._C._te.LoopNest
|
|
torch_direct_api
|
v_1_10_0
|
import torch
from torch import fx
from torch._C import _te # type: ignore[attr-defined]
class PointwiseCompiler(object):
def __init__(
self,
name: str,
module_name: str,
pointwise_fn: Callable,
spec: List,
result: PointwiseOperatorCompileResult,
):
self.name = name
self.module_name = module_name
self.pointwise_fn = pointwise_fn
self.spec = spec
self.result = result
self.ndim = max(x.ndim for x in spec)
self.shapes = [["one"] * (self.ndim - x.ndim) + x.shape for x in spec]
self.strides = [["zero"] * (self.ndim - x.ndim) + x.stride for x in spec]
self.shape_flags = copy.deepcopy(self.shapes)
self.stride_flags = copy.deepcopy(self.strides)
self.shape_args = [_te.VarHandle(torch.int32) for _ in range(self.ndim)]
self.shape_vars = list(self.shape_args)
self.iter_vars = [_te.VarHandle(torch.int32) for _ in range(self.ndim)]
self.stride_args: List[_te.VarHandle] = []
self.strides_from: List[Tuple[int, int]] = []
self.broadcasts: List[Tuple[int, int]] = []
self.output_order: List[int] = []
(self.device,) = list(set(x.device.type for x in spec))
self.compile_mode = {"cpu": "llvm", "cuda": "cuda"}[self.device]
if spec[-1].out:
self.dtype = spec[-1].dtype
else:
self.dtype = functools.reduce(_combine_dtype, [x.dtype for x in spec])
self.run()
def add_stride_arg(self, a, d):
var = _te.VarHandle(torch.int32)
self.stride_args.append(var)
self.strides_from.append((a, d))
return var
def replace_shape(self, a, d, expected, replacement):
if self.shapes[a][d] == expected:
self.shapes[a][d] = replacement()
def replace_stride(self, a, d, expected, replacement):
if self.strides[a][d] == expected:
self.strides[a][d] = replacement()
def error_checks(self):
spec = self.spec
(layout,) = list(set(x.layout for x in spec))
assert layout == torch.strided, "TODO: support other layouts"
assert [x.out for x in spec[:-1]] == [False] * (len(spec) - 1)
assert all(
shape_type in _SHAPE_TYPES for shape_type in itertools.chain(*self.shapes)
)
assert all(
stride_type in _STRIDE_TYPES
for stride_type in itertools.chain(*self.strides)
)
def make_backwards(self, index: int):
"""
Compute the derivative of self.pointwise_fn with respect to input number index
"""
from sympy import symbols, diff # type: ignore[import]
vars = symbols([f"v{i}" for i in range(1 + _num_args(self.pointwise_fn))])
backwards_expr = (
diff(self.pointwise_fn(*vars[:-1]), vars[index]) * vars[-1]
) # chain rule
return _source_to_pointwise_operator(
f"lambda {','.join(map(str, vars))}: {backwards_expr}",
name=f"{self.name}.backwards{index}",
module_name=self.module_name,
)
def handle_autograd(self):
cnt = sum(int(x.requires_grad) for x in self.spec)
if cnt == 0:
return
assert all(
x.alias_group == 0 for x in self.spec
), "TODO: support aliased backwards"
for i, spec in enumerate(self.spec):
if spec.requires_grad:
assert spec.alias_group == 0, "TODO: support aliased backwards"
assert spec.out == 0, "TODO: support autograd on out= ?"
for d in range(self.ndim):
shape_types = {shape[d] for shape in self.shapes}
assert (
len(shape_types) == 1
), "TODO: support backwards for broadcasting"
self.result.set_backwards(i, self.make_backwards(i))
def compute_broadcasts_and_size_checks(self):
ndim = self.ndim
spec = self.spec
nargs = len(spec)
longest = _argmax([x.ndim for x in spec])
shapes = self.shapes
shape_from = [(longest, d) for d in range(ndim)]
for d in range(ndim):
first = None
for a in range(nargs):
if shapes[a][d] == "one":
self.broadcasts.append((a, d))
elif shapes[a][d] == "other":
if first is None:
shape_from[d] = first = (a, d - (ndim - spec[a].ndim))
else:
self.result.add_shape_check(
(first[0], first[1], a, d - (ndim - spec[a].ndim))
)
if all(shapes[a][d] == "one" for a in range(nargs)):
self.shape_vars[d] = _one()
self.result.set_shape_from(shape_from)
def compute_output_order(self):
"""
Decide on an iteration order (permutation) for the dimensions of the output
"""
ndim = self.ndim
strides = self.strides
output_order = []
output_order_remaining = [[i] for i in range(ndim)]
for d in reversed(range(ndim)):
if strides[0][d] == "one":
output_order.extend(output_order_remaining[d])
output_order_remaining[d].clear()
for d in reversed(range(ndim)):
if strides[0][d] == "transposed_contiguous":
output_order_remaining[d - 1].extend(output_order_remaining[d])
output_order_remaining[d].clear()
for d in reversed(range(ndim)):
output_order.extend(output_order_remaining[d])
output_order_remaining[d].clear()
assert not self.output_order
self.output_order = output_order
assert sorted(output_order) == list(range(ndim))
def compute_symbolic_shapes_and_strides(self):
nargs = len(self.spec)
ndim = self.ndim
shapes = self.shapes
strides = self.strides
for a in range(nargs):
for d in range(ndim):
self.replace_shape(a, d, "one", _one)
self.replace_shape(a, d, "other", lambda: self.shape_args[d])
self.replace_stride(a, d, "zero", _zero)
self.replace_stride(a, d, "one", _one)
if strides[a][d] == "as_arg":
strides[a][d] = self.add_stride_arg(a, d)
while any(isinstance(x, str) for x in strides[a]):
for d in reversed(range(ndim)):
self.replace_stride(
a, d, "contiguous", lambda: strides[a][d + 1] * shapes[a][d + 1]
)
if isinstance(strides[a][d], str):
break
for d in range(ndim):
self.replace_stride(
a,
d,
"transposed_contiguous",
lambda: strides[a][d - 1] * shapes[a][d - 1],
)
if isinstance(strides[a][d], str):
break
for a, d in self.broadcasts:
strides[a][d] = _zero()
self.result.set_stride_args_from(self.strides_from)
def indexing(self, stride):
result = _zero()
for c, s in zip(self.iter_vars, stride):
result = result + c * s
return result
def compute_code(self):
bufs = [_te.BufHandle(s.dtype) for s in self.spec]
if not self.spec[-1].out:
options_from = [
i for i in range(len(self.spec)) if self.spec[i].dtype == self.dtype
][0]
self.result.add_allocated_output(options_from, self.output_order)
bufs.append(_te.BufHandle(self.dtype))
self.shapes.append(list(self.shape_vars))
output_strides = [None] * self.ndim
next_stride = _one()
for i in self.output_order:
output_strides[i] = next_stride
next_stride *= self.shape_vars[i]
assert all((x is not None) for x in output_strides)
self.strides.append(output_strides)
bufs_args = list(bufs)
aliases = {}
for i, s in enumerate(self.spec):
assert s.alias_group >= 0, "TODO: support complex aliasing"
if s.alias_group > 0 and s.alias_group not in aliases:
aliases[s.alias_group] = i
elif s.alias_group > 0 and FOLD_ALIASES:
bufs[i] = bufs[aliases[s.alias_group]]
input_bufs = bufs[:-1]
input_strides = self.strides[:-1]
output_bufs = bufs[-1:]
output_strides = self.strides[-1:]
inputs = [
_te.Cast.make(self.dtype, buf.load(self.indexing(stride)))
for buf, stride in zip(input_bufs, input_strides)
]
val = _fx_to_expr(self.pointwise_fn, self.dtype)(*inputs)
out = _te.Block(
[
buf.store(self.indexing(stride), val)
for buf, stride in zip(output_bufs, output_strides)
]
)
loops: List[_te.For] = []
for i in self.output_order:
var = self.iter_vars[i]
size = self.shape_vars[i]
out = _te.For.make(var, _zero(), size, out)
loops.insert(0, out)
loopnest = _te.LoopNest(_te.Block([out]), output_bufs)
if self.device == "cuda" and loops:
flattened = loopnest.flatten(loops)
assert flattened
inner =
|
_te.LoopNest.split_with_mask(flattened, 512)
|
assert inner
flattened.set_gpu_block_index(0)
inner.set_gpu_thread_index(0)
elif self.dtype == "llvm" and loops:
|
torch
|
torch._C._te.LoopNest.split_with_mask
|
|
torch_direct_api
|
v_1_10_0
|
import torch
from torch.utils._pytree import tree_flatten, tree_unflatten, tree_map
import torch.autograd.forward_ad as fwAD
def vjp(func: Callable, *primals, has_aux: bool = False):
"""
Standing for the vector-Jacobian product, returns a tuple containing the
results of :attr:`func` applied to :attr:`primals` and a function that, when
given ``cotangents``, computes the reverse-mode Jacobian of :attr:`func` with
respect to :attr:`primals` times ``cotangents``.
Args:
func (Callable): A Python function that takes one or more arguments. Must
return one or more Tensors.
primals (Tensors): Positional arguments to :attr:`func` that must all be
Tensors. The returned function will also be computing the
derivative with respect to these arguments
has_aux (bool): Flag indicating that :attr:`func` returns a
``(output, aux)`` tuple where the first element is the output of
the function to be differentiated and the second element is
other auxiliary objects that will not be differentiated.
Default: False.
Returns:
Returns a ``(output, vjp_fn)`` tuple containing the output of :attr:`func`
applied to :attr:`primals` and a function that computes the vjp of
:attr:`func` with respect to all :attr:`primals` using the cotangents passed
to the returned function. If ``has_aux is True``, then instead returns a
``(output, vjp_fn, aux)`` tuple.
The returned ``vjp_fn`` function will return a tuple of each VJP.
When used in simple cases, :func:`vjp` behaves the same as :func:`grad`
>>> x = torch.randn([5])
>>> f = lambda x: x.sin().sum()
>>> (_, vjpfunc) = functorch.vjp(f, x)
>>> grad = vjpfunc(torch.tensor(1.))[0]
>>> assert torch.allclose(grad, functorch.grad(f)(x))
However, :func:`vjp` can support functions with multiple outputs by
passing in the cotangents for each of the outputs
>>> x = torch.randn([5])
>>> f = lambda x: (x.sin(), x.cos())
>>> (_, vjpfunc) = functorch.vjp(f, x)
>>> vjps = vjpfunc((torch.ones([5]), torch.ones([5])))
>>> assert torch.allclose(vjps[0], x.cos() + -x.sin())
:func:`vjp` can even support outputs being Python structs
>>> x = torch.randn([5])
>>> f = lambda x: {'first': x.sin(), 'second': x.cos()}
>>> (_, vjpfunc) = functorch.vjp(f, x)
>>> cotangents = {'first': torch.ones([5]), 'second': torch.ones([5])}
>>> vjps = vjpfunc(cotangents)
>>> assert torch.allclose(vjps[0], x.cos() + -x.sin())
The function returned by :func:`vjp` will compute the partials with
respect to each of the :attr:`primals`
>>> x, y = torch.randn([5, 4]), torch.randn([4, 5])
>>> (_, vjpfunc) = functorch.vjp(torch.matmul, x, y)
>>> cotangents = torch.randn([5, 5])
>>> vjps = vjpfunc(cotangents)
>>> assert len(vjps) == 2
>>> assert torch.allclose(vjps[0], torch.matmul(cotangents, y.transpose(0, 1)))
>>> assert torch.allclose(vjps[1], torch.matmul(x.transpose(0, 1), cotangents))
:attr:`primals` are the positional arguments for :attr:`f`. All kwargs use their
default value
>>> x = torch.randn([5])
>>> def f(x, scale=4.):
>>> return x * 4.
>>>
>>> (_, vjpfunc) = functorch.vjp(f, x)
>>> vjps = vjpfunc(torch.ones_like(x))
>>> assert torch.allclose(vjps[0], torch.full(x.shape, 4.))
.. note::
Using PyTorch ``torch.no_grad`` together with ``vjp``.
Case 1: Using ``torch.no_grad`` inside a function:
>>> def f(x):
>>> with torch.no_grad():
>>> c = x ** 2
>>> return x - c
In this case, ``vjp(f)(x)`` will respect the inner ``torch.no_grad``.
Case 2: Using ``vjp`` inside ``torch.no_grad`` context manager:
>>> with torch.no_grad():
>>> vjp(f)(x)
In this case, ``vjp`` will respect the inner ``torch.no_grad``, but not the
outer one. This is because ``vjp`` is a "function transform": its result
should not depend on the result of a context manager outside of ``f``.
"""
level = _grad_increment_nesting()
try:
with torch.enable_grad():
primals = _wrap_all_tensors(primals, level)
diff_primals = _create_differentiable(primals, level)
primals_out = func(*diff_primals)
if has_aux:
if not (isinstance(primals_out, tuple) and len(primals_out) == 2):
raise RuntimeError(
"vjp(f, *primals): output of function f should be a tuple: (output, aux) "
"if has_aux is True"
)
primals_out, aux = primals_out
aux = _undo_create_differentiable(aux, level)
flat_primals_out, primals_out_spec = tree_flatten(primals_out)
assert_non_empty_tensor_output(flat_primals_out, 'vjp(f, *primals)')
flat_diff_primals, primals_spec = tree_flatten(diff_primals)
results = _undo_create_differentiable(primals_out, level)
for primal_out in flat_primals_out:
assert isinstance(primal_out, torch.Tensor)
if primal_out.is_floating_point() or primal_out.is_complex():
continue
raise RuntimeError("vjp(f, ...): All outputs of f must be "
"floating-point or complex Tensors, got Tensor "
f"with dtype {primal_out.dtype}")
def wrapper(cotangents, retain_graph=True, create_graph=None):
if create_graph is None:
create_graph =
|
torch
|
torch.is_grad_enabled()
|
flat_cotangents, cotangents_spec = tree_flatten(cotangents)
if primals_out_spec != cotangents_spec:
raise RuntimeError(
f'Expected pytree structure of cotangents to be the same '
|
torch
|
torch.is_grad_enabled
|
torch_direct_api
|
v_1_10_0
|
import torch
from torch.utils._pytree import tree_flatten, tree_unflatten, tree_map
import torch.autograd.forward_ad as fwAD
def safe_unpack_dual(dual, strict):
if not isinstance(dual, torch.Tensor):
raise RuntimeError(
f'{jvp_str}: expected f(*args) to return only tensors'
f', got unsupported type {type(dual)}'
)
primal, tangent =
|
fwAD
|
fwAD.unpack_dual(dual)
|
if tangent is None:
if strict:
raise RuntimeError(
'jvp(f, primals, tangents, strict=True): '
|
torch
|
torch.autograd.forward_ad.unpack_dual
|
torch_direct_api
|
v_1_10_0
|
import torch
from torch._C import _disabled_torch_function_impl
import torch.utils._pytree as pytree
from torch.fx import Tracer, GraphModule
import torch.fx as fx
from torch.fx.passes.shape_prop import _extract_tensor_metadata
class PythonTensor(torch.Tensor):
elem: torch.Tensor
__slots__ = ['elem', 'proxy']
@staticmethod
def __new__(cls, elem, proxy, device=None):
r = torch.Tensor._make_wrapper_subclass(
cls, elem.size(),
strides=elem.stride(), storage_offset=elem.storage_offset(),
dtype=elem.dtype, layout=elem.layout, requires_grad=elem.requires_grad,
device=(elem.device if device is None else device),
)
if USE_META:
r.elem = elem.to('meta')
else:
r.elem = elem
r.proxy = proxy
proxy.node.meta['tensor_meta'] =
|
_extract_tensor_metadata(r)
|
return r
def __repr__(self):
return f"PythonTensor({self.elem})"
|
torch
|
torch.fx.passes.shape_prop._extract_tensor_metadata
|
|
torch_direct_api
|
v_1_10_0
|
import torch
from torch._C import _disabled_torch_function_impl
import torch.utils._pytree as pytree
from torch.fx import Tracer, GraphModule
import torch.fx as fx
from torch.fx.passes.shape_prop import _extract_tensor_metadata
class PythonTensor(torch.Tensor):
elem: torch.Tensor
__slots__ = ['elem', 'proxy']
@staticmethod
def __new__(cls, elem, proxy, device=None):
r = torch.Tensor._make_wrapper_subclass(
cls, elem.size(),
strides=elem.stride(), storage_offset=elem.storage_offset(),
dtype=elem.dtype, layout=elem.layout, requires_grad=elem.requires_grad,
device=(elem.device if device is None else device),
)
if USE_META:
r.elem = elem.to('meta')
else:
r.elem = elem
r.proxy = proxy
proxy.node.meta['tensor_meta'] = _extract_tensor_metadata(r)
return r
def __repr__(self):
return f"PythonTensor({self.elem})"
__torch_function__ = _disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
if func in CURRENT_DECOMPOSITION_TABLE:
return CURRENT_DECOMPOSITION_TABLE[func](*args, **kwargs)
def unwrap_proxy(e):
return e.proxy if isinstance(e, PythonTensor) else e
def unwrap_tensor(e):
return e.elem if isinstance(e, PythonTensor) else e
input_devices = [i.device for i in pytree.tree_flatten(args)[0] +
pytree.tree_flatten(kwargs)[0] if isinstance(i, torch.Tensor)]
output_device = get_output_device(input_devices, func)
proxy_args = pytree.tree_map(unwrap_proxy, args)
proxy_kwargs = pytree.tree_map(unwrap_proxy, kwargs)
proxy_out = func(*proxy_args, **proxy_kwargs)
if func.__name__[-1] == "_" and func.__name__[0] != "_":
args[0].proxy = proxy_out
args = pytree.tree_map(unwrap_tensor, args)
kwargs = pytree.tree_map(unwrap_tensor, kwargs)
try:
real_out = func(*args, **kwargs)
except NotImplementedError:
args = pytree.tree_map(lambda x: torch.ones_like(x, device=output_device)
if isinstance(x, torch.Tensor) else x, args)
kwargs = pytree.tree_map(lambda x: torch.ones_like(x, device=output_device)
if isinstance(x, torch.Tensor) else x, kwargs)
real_out = func(*args, **kwargs)
def wrap_with_proxy(e, proxy):
if e is None:
e =
|
torch
|
torch.empty(())
|
if type(e) == torch.Tensor:
return PythonTensor(e, proxy, output_device)
else:
return e
|
torch
|
torch.empty
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.fx as fx
import torch.nn as nn
def ts_compile(fx_g: fx.GraphModule, _) -> Callable:
"""
Compiles the :attr:`fx_g` with Torchscript compiler.
.. warning::
This API is experimental and likely to change.
Args:
fx_g(fx.GraphModule): The input Fx graph module to be compiled.
Returns:
Torch scripted model.
"""
for node in fx_g.graph.nodes:
if node.target in (torch.ops.aten.new_zeros, torch.ops.aten.new_empty):
if node.args[1] == []:
args = list(node.args)
args[1] = [1]
node.args = tuple(args)
elif node.target == torch.ops.aten.avg_pool2d_backward:
if node.args[3] == []:
args = list(node.args)
args[3] = [1, 1]
node.args = tuple(args)
for node in fx_g.graph.nodes:
new_kwargs = {}
for k, v in node.kwargs.items():
if isinstance(v, torch.device):
v = v.type
new_kwargs[k] = v
node.kwargs = new_kwargs
fx_g.graph.lint()
for i in range(1000):
attr = f"_tensor_constant{i}"
if hasattr(fx_g, attr):
setattr(fx_g, attr, getattr(fx_g, attr).cuda())
else:
break
fx_g.recompile()
f =
|
torch
|
torch.jit.script(fx_g)
|
torch._C._jit_pass_remove_mutation(f.graph)
f = torch.jit.freeze(f.eval())
|
torch
|
torch.jit.script
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.fx as fx
import torch.nn as nn
def ts_compile(fx_g: fx.GraphModule, _) -> Callable:
"""
Compiles the :attr:`fx_g` with Torchscript compiler.
.. warning::
This API is experimental and likely to change.
Args:
fx_g(fx.GraphModule): The input Fx graph module to be compiled.
Returns:
Torch scripted model.
"""
for node in fx_g.graph.nodes:
if node.target in (torch.ops.aten.new_zeros, torch.ops.aten.new_empty):
if node.args[1] == []:
args = list(node.args)
args[1] = [1]
node.args = tuple(args)
elif node.target == torch.ops.aten.avg_pool2d_backward:
if node.args[3] == []:
args = list(node.args)
args[3] = [1, 1]
node.args = tuple(args)
for node in fx_g.graph.nodes:
new_kwargs = {}
for k, v in node.kwargs.items():
if isinstance(v, torch.device):
v = v.type
new_kwargs[k] = v
node.kwargs = new_kwargs
fx_g.graph.lint()
for i in range(1000):
attr = f"_tensor_constant{i}"
if hasattr(fx_g, attr):
setattr(fx_g, attr, getattr(fx_g, attr).cuda())
else:
break
fx_g.recompile()
f = torch.jit.script(fx_g)
torch._C._jit_pass_remove_mutation(f.graph)
f =
|
torch
|
torch.jit.freeze(f.eval())
|
f = torch.jit.optimize_for_inference(f)
return f
|
torch
|
torch.jit.freeze
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.fx as fx
import torch.nn as nn
def ts_compile(fx_g: fx.GraphModule, _) -> Callable:
"""
Compiles the :attr:`fx_g` with Torchscript compiler.
.. warning::
This API is experimental and likely to change.
Args:
fx_g(fx.GraphModule): The input Fx graph module to be compiled.
Returns:
Torch scripted model.
"""
for node in fx_g.graph.nodes:
if node.target in (torch.ops.aten.new_zeros, torch.ops.aten.new_empty):
if node.args[1] == []:
args = list(node.args)
args[1] = [1]
node.args = tuple(args)
elif node.target == torch.ops.aten.avg_pool2d_backward:
if node.args[3] == []:
args = list(node.args)
args[3] = [1, 1]
node.args = tuple(args)
for node in fx_g.graph.nodes:
new_kwargs = {}
for k, v in node.kwargs.items():
if isinstance(v, torch.device):
v = v.type
new_kwargs[k] = v
node.kwargs = new_kwargs
fx_g.graph.lint()
for i in range(1000):
attr = f"_tensor_constant{i}"
if hasattr(fx_g, attr):
setattr(fx_g, attr, getattr(fx_g, attr).cuda())
else:
break
fx_g.recompile()
f = torch.jit.script(fx_g)
torch._C._jit_pass_remove_mutation(f.graph)
f = torch.jit.freeze(f.eval())
f =
|
torch
|
torch.jit.optimize_for_inference(f)
|
return f
def tensorexpr_compile(fx_module: fx.GraphModule, flat_args) -> Callable:
|
torch
|
torch.jit.optimize_for_inference
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.