group
stringclasses 5
values | version
stringclasses 1
value | prompt
stringlengths 48
35.8k
| code_str
stringclasses 213
values | target
stringlengths 4
395
| right_context_few_lines
stringlengths 1
358
| library
stringclasses 1
value | api
stringlengths 6
61
⌀ |
|---|---|---|---|---|---|---|---|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn.functional as F
from torch.distributed import all_reduce, get_rank, get_world_size, init_process_group
def compute_world_size() -> int:
rank = int(os.getenv("RANK")) # pyre-ignore[6]
world_size = int(os.getenv("WORLD_SIZE")) # pyre-ignore[6]
master_port = int(os.getenv("MASTER_PORT")) # pyre-ignore[6]
master_addr = os.getenv("MASTER_ADDR")
backend = "gloo"
print(f"initializing `{backend}` process group")
init_process_group(
backend=backend,
init_method=f"tcp://{master_addr}:{master_port}",
rank=rank,
world_size=world_size,
)
print("successfully initialized process group")
rank =
|
get
|
get_rank()
|
world_size = get_world_size()
t = F.one_hot(torch.tensor(rank), num_classes=world_size)
all_reduce(t)
|
torch
|
torch.distributed.get_rank
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn.functional as F
from torch.distributed import all_reduce, get_rank, get_world_size, init_process_group
def compute_world_size() -> int:
rank = int(os.getenv("RANK")) # pyre-ignore[6]
world_size = int(os.getenv("WORLD_SIZE")) # pyre-ignore[6]
master_port = int(os.getenv("MASTER_PORT")) # pyre-ignore[6]
master_addr = os.getenv("MASTER_ADDR")
backend = "gloo"
print(f"initializing `{backend}` process group")
init_process_group(
backend=backend,
init_method=f"tcp://{master_addr}:{master_port}",
rank=rank,
world_size=world_size,
)
print("successfully initialized process group")
rank = get_rank()
world_size =
|
get
|
get_world_size()
|
t = F.one_hot(torch.tensor(rank), num_classes=world_size)
all_reduce(t)
computed_world_size = int(torch.sum(t).item())
|
torch
|
torch.distributed.get_world_size
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn.functional as F
from torch.distributed import all_reduce, get_rank, get_world_size, init_process_group
def compute_world_size() -> int:
rank = int(os.getenv("RANK")) # pyre-ignore[6]
world_size = int(os.getenv("WORLD_SIZE")) # pyre-ignore[6]
master_port = int(os.getenv("MASTER_PORT")) # pyre-ignore[6]
master_addr = os.getenv("MASTER_ADDR")
backend = "gloo"
print(f"initializing `{backend}` process group")
init_process_group(
backend=backend,
init_method=f"tcp://{master_addr}:{master_port}",
rank=rank,
world_size=world_size,
)
print("successfully initialized process group")
rank = get_rank()
world_size = get_world_size()
t =
|
F
|
F.one_hot(torch.tensor(rank), num_classes=world_size)
|
all_reduce(t)
computed_world_size = int(torch.sum(t).item())
print(
f"rank: {rank}, actual world_size: {world_size}, computed world_size: {computed_world_size}"
|
torch
|
torch.nn.functional.one_hot
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.distributed as dist
from torch.distributed.distributed_c10d import _get_default_group
def local_device() -> torch.device:
"""
Returns the device that the current process should be using for models and tensors
based on the default process group.
.. note:: If the process group has not been initialized
then this method returns ``cuda`` if GPU is available on the machine, and ``cpu`` otherwise.
Returns ``cuda:$LOCAL_RANK`` if the default process group's backend is ``nccl`` otherwise ``cpu``
"""
if dist.is_initialized():
default_pg =
|
_get_default_group()
|
return (
local_cuda_device()
if default_pg.options.backend == "nccl"
else torch.device("cpu")
|
torch
|
torch.distributed.distributed_c10d._get_default_group
|
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.jit
from torch.nn import functional as F
class TinyImageNetModel(pl.LightningModule):
"""
An very simple linear model for the tiny image net dataset.
"""
def __init__(
self, layer_sizes: Optional[List[int]] = None, lr: Optional[float] = None
) -> None:
super().__init__()
if not layer_sizes:
layer_sizes = [1, 1, 1, 1]
self.lr: float = lr or 0.001
m = ResNet(BasicBlock, layer_sizes)
m.avgpool =
|
torch
|
torch.nn.AdaptiveAvgPool2d(1)
|
m.fc.out_features = 200
self.model: ResNet = m
self.train_acc = Accuracy()
|
torch
|
torch.nn.AdaptiveAvgPool2d
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.jit
from torch.nn import functional as F
def export_inference_model(
model: TinyImageNetModel, out_path: str, tmpdir: str
) -> None:
"""
export_inference_model uses TorchScript JIT to serialize the
TinyImageNetModel into a standalone file that can be used during inference.
TorchServe can also handle interpreted models with just the model.py file if
your model can't be JITed.
"""
print("exporting inference model")
jit_path = os.path.join(tmpdir, "model_jit.pt")
jitted =
|
torch
|
torch.jit.script(model)
|
print(f"saving JIT model to {jit_path}")
torch.jit.save(jitted, jit_path)
model_name = "tiny_image_net"
|
torch
|
torch.jit.script
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 =
|
nn
|
nn.Conv2d(1, 32, 3, 1)
|
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
|
torch
|
torch.nn.Conv2d
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 =
|
nn
|
nn.Dropout(0.25)
|
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
|
torch
|
torch.nn.Dropout
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 =
|
nn
|
nn.Linear(9216, 128)
|
self.fc2 = nn.Linear(128, 10)
def forward(self, x: Tensor) -> Tensor:
x = self.conv1(x)
|
torch
|
torch.nn.Linear
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x =
|
F
|
F.relu(x)
|
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
|
torch
|
torch.nn.functional.relu
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x =
|
F
|
F.max_pool2d(x, 2)
|
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
|
torch
|
torch.nn.functional.max_pool2d
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x =
|
torch
|
torch.flatten(x, 1)
|
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
|
torch
|
torch.flatten
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output =
|
F
|
F.log_softmax(x, dim=1)
|
return output
def train(
|
torch
|
torch.nn.functional.log_softmax
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
def train(
args: Namespace,
model: nn.Module,
device: torch.device,
train_loader: torch.utils.data.DataLoader[VisionDataset],
optimizer: optim.Optimizer,
epoch: int,
writer: Optional[SummaryWriter],
) -> None:
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss =
|
F
|
F.nll_loss(output, target)
|
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print(
|
torch
|
torch.nn.functional.nll_loss
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
def main() -> None:
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=1000,
metavar="N",
help="input batch size for testing (default: 1000)",
)
parser.add_argument(
"--epochs",
type=int,
default=14,
metavar="N",
help="number of epochs to train (default: 14)",
)
parser.add_argument(
"--lr",
type=float,
default=1.0,
metavar="LR",
help="learning rate (default: 1.0)",
)
parser.add_argument(
"--gamma",
type=float,
default=0.7,
metavar="M",
help="Learning rate step gamma (default: 0.7)",
)
parser.add_argument(
"--no-cuda", action="store_true", default=False, help="disables CUDA training"
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--save-model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--tb-log-path",
type=str,
default=None,
help="Tensorboard log path",
)
parser.add_argument(
"--data-path",
type=str,
default="../data",
help="Model data storge path",
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device =
|
torch
|
torch.device("cuda" if use_cuda else "cpu")
|
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.test_batch_size}
if use_cuda:
|
torch
|
torch.device
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
def main() -> None:
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=1000,
metavar="N",
help="input batch size for testing (default: 1000)",
)
parser.add_argument(
"--epochs",
type=int,
default=14,
metavar="N",
help="number of epochs to train (default: 14)",
)
parser.add_argument(
"--lr",
type=float,
default=1.0,
metavar="LR",
help="learning rate (default: 1.0)",
)
parser.add_argument(
"--gamma",
type=float,
default=0.7,
metavar="M",
help="Learning rate step gamma (default: 0.7)",
)
parser.add_argument(
"--no-cuda", action="store_true", default=False, help="disables CUDA training"
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--save-model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--tb-log-path",
type=str,
default=None,
help="Tensorboard log path",
)
parser.add_argument(
"--data-path",
type=str,
default="../data",
help="Model data storge path",
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.test_batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
data_path = args.data_path
if not os.path.exists(data_path):
os.makedirs(data_path)
dataset1 = datasets.MNIST(data_path, train=True, download=True, transform=transform)
dataset2 = datasets.MNIST(data_path, train=False, transform=transform)
train_loader =
|
torch
|
torch.utils.data.DataLoader(dataset1, **train_kwargs)
|
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
|
torch
|
torch.utils.data.DataLoader
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
def main() -> None:
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=1000,
metavar="N",
help="input batch size for testing (default: 1000)",
)
parser.add_argument(
"--epochs",
type=int,
default=14,
metavar="N",
help="number of epochs to train (default: 14)",
)
parser.add_argument(
"--lr",
type=float,
default=1.0,
metavar="LR",
help="learning rate (default: 1.0)",
)
parser.add_argument(
"--gamma",
type=float,
default=0.7,
metavar="M",
help="Learning rate step gamma (default: 0.7)",
)
parser.add_argument(
"--no-cuda", action="store_true", default=False, help="disables CUDA training"
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--save-model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--tb-log-path",
type=str,
default=None,
help="Tensorboard log path",
)
parser.add_argument(
"--data-path",
type=str,
default="../data",
help="Model data storge path",
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.test_batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
data_path = args.data_path
if not os.path.exists(data_path):
os.makedirs(data_path)
dataset1 = datasets.MNIST(data_path, train=True, download=True, transform=transform)
dataset2 = datasets.MNIST(data_path, train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer =
|
optim
|
optim.Adadelta(model.parameters(), lr=args.lr)
|
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
|
torch
|
torch.optim.Adadelta
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
def main() -> None:
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=1000,
metavar="N",
help="input batch size for testing (default: 1000)",
)
parser.add_argument(
"--epochs",
type=int,
default=14,
metavar="N",
help="number of epochs to train (default: 14)",
)
parser.add_argument(
"--lr",
type=float,
default=1.0,
metavar="LR",
help="learning rate (default: 1.0)",
)
parser.add_argument(
"--gamma",
type=float,
default=0.7,
metavar="M",
help="Learning rate step gamma (default: 0.7)",
)
parser.add_argument(
"--no-cuda", action="store_true", default=False, help="disables CUDA training"
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--save-model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--tb-log-path",
type=str,
default=None,
help="Tensorboard log path",
)
parser.add_argument(
"--data-path",
type=str,
default="../data",
help="Model data storge path",
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.test_batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
data_path = args.data_path
if not os.path.exists(data_path):
os.makedirs(data_path)
dataset1 = datasets.MNIST(data_path, train=True, download=True, transform=transform)
dataset2 = datasets.MNIST(data_path, train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler =
|
StepLR
|
StepLR(optimizer, step_size=1, gamma=args.gamma)
|
app_run = tracker.app_run_from_env()
|
torch
|
torch.optim.lr_scheduler.StepLR
|
torch_direct_api
|
v_1_10_0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
def main() -> None:
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=1000,
metavar="N",
help="input batch size for testing (default: 1000)",
)
parser.add_argument(
"--epochs",
type=int,
default=14,
metavar="N",
help="number of epochs to train (default: 14)",
)
parser.add_argument(
"--lr",
type=float,
default=1.0,
metavar="LR",
help="learning rate (default: 1.0)",
)
parser.add_argument(
"--gamma",
type=float,
default=0.7,
metavar="M",
help="Learning rate step gamma (default: 0.7)",
)
parser.add_argument(
"--no-cuda", action="store_true", default=False, help="disables CUDA training"
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--save-model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--tb-log-path",
type=str,
default=None,
help="Tensorboard log path",
)
parser.add_argument(
"--data-path",
type=str,
default="../data",
help="Model data storge path",
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.test_batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
data_path = args.data_path
if not os.path.exists(data_path):
os.makedirs(data_path)
dataset1 = datasets.MNIST(data_path, train=True, download=True, transform=transform)
dataset2 = datasets.MNIST(data_path, train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
app_run = tracker.app_run_from_env()
app_run.add_metadata(**train_kwargs)
app_run.add_metadata(lr=args.lr, gamma=args.gamma)
app_run.add_metadata(data_path=data_path)
writer = None
if args.tb_log_path:
writer =
|
SummaryWriter
|
SummaryWriter(log_dir=args.tb_log_path)
|
app_run.add_artifact("tensorboard", args.tb_log_path)
for epoch in range(1, args.epochs + 1):
|
torch
|
torch.utils.tensorboard.SummaryWriter
|
torch_direct_api
|
v_1_10_0
|
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
class TestMinifier(TestCase):
def test_has_mul_minifier(self):
def failing_f(x, y):
y = y / 3
x = x + 3
x = x * y
return x + y
inps = [
|
torch
|
torch.randn(3)
|
, torch.randn(3)]
failing_f = make_fx(failing_f)(*inps)
def pass_checker(fx_g, inps):
return (torch.ops.aten.mul in set([i.target for i in fx_g.graph.nodes]))
|
torch
|
torch.randn
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn as nn
import torch.utils._pytree as pytree
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
class TestAOTAutograd(TestCase):
def verify_aot_autograd(self, f, inp):
if isinstance(f, nn.Module):
compiled_f = aot_module(f, nop)
else:
compiled_f = aot_function(f, nop)
ref_out, ref_grad = _outs_and_grads(f, inp)
test_out, test_grad = _outs_and_grads(compiled_f, inp)
self.assertEqual(ref_out, test_out)
self.assertEqual(ref_grad, test_grad)
def test_single_output(self):
def f(a, b):
return a + b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output(self):
def f(a, b):
return a + b, a - b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output_list(self):
def f(a, b):
return [a + b, a - b]
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_no_grad_input_output(self):
def f(a, b):
return a.cos(), b.cos(), a * b
inp_thunks = [lambda: torch.randn(5, requires_grad=True), lambda: torch.randn(5, requires_grad=False)]
for inps in itertools.product(inp_thunks, repeat=2):
inps = [i() for i in inps]
self.verify_aot_autograd(f, inps)
def test_inner_grad(self):
def foo(x):
y =
|
torch
|
torch.exp(x)
|
z = torch.autograd.grad(y, x)
return z
inps = [torch.randn((), requires_grad=True)]
self.verify_aot_autograd(foo, inps)
|
torch
|
torch.exp
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn as nn
import torch.utils._pytree as pytree
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
class TestAOTAutograd(TestCase):
def verify_aot_autograd(self, f, inp):
if isinstance(f, nn.Module):
compiled_f = aot_module(f, nop)
else:
compiled_f = aot_function(f, nop)
ref_out, ref_grad = _outs_and_grads(f, inp)
test_out, test_grad = _outs_and_grads(compiled_f, inp)
self.assertEqual(ref_out, test_out)
self.assertEqual(ref_grad, test_grad)
def test_single_output(self):
def f(a, b):
return a + b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output(self):
def f(a, b):
return a + b, a - b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output_list(self):
def f(a, b):
return [a + b, a - b]
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_no_grad_input_output(self):
def f(a, b):
return a.cos(), b.cos(), a * b
inp_thunks = [lambda: torch.randn(5, requires_grad=True), lambda: torch.randn(5, requires_grad=False)]
for inps in itertools.product(inp_thunks, repeat=2):
inps = [i() for i in inps]
self.verify_aot_autograd(f, inps)
def test_inner_grad(self):
def foo(x):
y = torch.exp(x)
z =
|
torch
|
torch.autograd.grad(y, x)
|
return z
inps = [torch.randn((), requires_grad=True)]
self.verify_aot_autograd(foo, inps)
|
torch
|
torch.autograd.grad
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn as nn
import torch.utils._pytree as pytree
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
class TestAOTAutograd(TestCase):
def verify_aot_autograd(self, f, inp):
if isinstance(f, nn.Module):
compiled_f = aot_module(f, nop)
else:
compiled_f = aot_function(f, nop)
ref_out, ref_grad = _outs_and_grads(f, inp)
test_out, test_grad = _outs_and_grads(compiled_f, inp)
self.assertEqual(ref_out, test_out)
self.assertEqual(ref_grad, test_grad)
def test_single_output(self):
def f(a, b):
return a + b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output(self):
def f(a, b):
return a + b, a - b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output_list(self):
def f(a, b):
return [a + b, a - b]
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_no_grad_input_output(self):
def f(a, b):
return a.cos(), b.cos(), a * b
inp_thunks = [lambda: torch.randn(5, requires_grad=True), lambda: torch.randn(5, requires_grad=False)]
for inps in itertools.product(inp_thunks, repeat=2):
inps = [i() for i in inps]
self.verify_aot_autograd(f, inps)
def test_inner_grad(self):
def foo(x):
y = torch.exp(x)
z = torch.autograd.grad(y, x)
return z
inps = [torch.randn((), requires_grad=True)]
self.verify_aot_autograd(foo, inps)
def test_grad_context(self):
def foo(x):
return x * 2
inps = [torch.randn((), requires_grad=True)]
graph_size = None
def assert_graph_empty(fx_g, _):
nonlocal graph_size
graph_size = len(fx_g.graph.nodes)
return fx_g
start_recompilations = num_of_recompilations()
f = aot_function(foo, nop, assert_graph_empty)
with torch.set_grad_enabled(False):
f(*inps)
self.assertEqual(graph_size, 2)
with torch.set_grad_enabled(True):
f(*inps)
self.assertTrue(graph_size > 2)
self.assertEqual(num_of_recompilations() - start_recompilations, 2)
def test_output_dict(self):
def f(x):
return {'a': x, 'b': x}
inp = [torch.randn(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp)
def f(x, y):
return {'a': x, 'b': y + x}
inp = [torch.randn(3, requires_grad=True), torch.randn(3)]
self.verify_aot_autograd(f, inp)
def f(x):
new_d = {}
for k in x:
new_d[k] = x[k] * 2
return new_d
inp = [{'a': torch.randn(3, requires_grad=True), 'b': torch.randn(3, requires_grad=True)}]
self.verify_aot_autograd(f, inp)
def test_module(self):
mod =
|
nn
|
nn.Sequential(nn.Linear(32, 32), nn.ReLU())
|
compiled_mod = compiled_module(mod, nop, nop)
inp = torch.randn(32, 32)
ref_out = mod(inp)
ref_out.sum().backward()
|
torch
|
torch.nn.Sequential
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn as nn
import torch.utils._pytree as pytree
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
class TestAOTAutograd(TestCase):
def verify_aot_autograd(self, f, inp):
if isinstance(f, nn.Module):
compiled_f = aot_module(f, nop)
else:
compiled_f = aot_function(f, nop)
ref_out, ref_grad = _outs_and_grads(f, inp)
test_out, test_grad = _outs_and_grads(compiled_f, inp)
self.assertEqual(ref_out, test_out)
self.assertEqual(ref_grad, test_grad)
def test_single_output(self):
def f(a, b):
return a + b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output(self):
def f(a, b):
return a + b, a - b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output_list(self):
def f(a, b):
return [a + b, a - b]
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_no_grad_input_output(self):
def f(a, b):
return a.cos(), b.cos(), a * b
inp_thunks = [lambda: torch.randn(5, requires_grad=True), lambda: torch.randn(5, requires_grad=False)]
for inps in itertools.product(inp_thunks, repeat=2):
inps = [i() for i in inps]
self.verify_aot_autograd(f, inps)
def test_inner_grad(self):
def foo(x):
y = torch.exp(x)
z = torch.autograd.grad(y, x)
return z
inps = [torch.randn((), requires_grad=True)]
self.verify_aot_autograd(foo, inps)
def test_grad_context(self):
def foo(x):
return x * 2
inps = [torch.randn((), requires_grad=True)]
graph_size = None
def assert_graph_empty(fx_g, _):
nonlocal graph_size
graph_size = len(fx_g.graph.nodes)
return fx_g
start_recompilations = num_of_recompilations()
f = aot_function(foo, nop, assert_graph_empty)
with torch.set_grad_enabled(False):
f(*inps)
self.assertEqual(graph_size, 2)
with torch.set_grad_enabled(True):
f(*inps)
self.assertTrue(graph_size > 2)
self.assertEqual(num_of_recompilations() - start_recompilations, 2)
def test_output_dict(self):
def f(x):
return {'a': x, 'b': x}
inp = [torch.randn(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp)
def f(x, y):
return {'a': x, 'b': y + x}
inp = [torch.randn(3, requires_grad=True), torch.randn(3)]
self.verify_aot_autograd(f, inp)
def f(x):
new_d = {}
for k in x:
new_d[k] = x[k] * 2
return new_d
inp = [{'a': torch.randn(3, requires_grad=True), 'b': torch.randn(3, requires_grad=True)}]
self.verify_aot_autograd(f, inp)
def test_module(self):
mod = nn.Sequential(nn.Linear(32, 32), nn.ReLU())
compiled_mod = compiled_module(mod, nop, nop)
inp = torch.randn(32, 32)
ref_out = mod(inp)
ref_out.sum().backward()
ref_grads = sorted([(name, p.grad) for name, p in mod.named_parameters()])
out = compiled_mod(inp)
out.sum().backward()
grads = sorted([(name, p.grad) for name, p in mod.named_parameters()])
self.assertEqual((out, grads), (ref_out, ref_grads))
def test_batchnorm(self):
mod = compiled_module(nn.BatchNorm2d(4), nop, nop)
x =
|
torch
|
torch.ones(1, 4, 2, 2)
|
mod(x).sum().backward()
class TestEagerFusionOpInfo(TestCase):
|
torch
|
torch.ones
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn as nn
import torch.utils._pytree as pytree
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
class TestEagerFusionOpInfo(TestCase):
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestEagerFusionOpInfo', 'test_aot_autograd_exhaustive', {
xfail('__rmatmul__'),
xfail('linalg.cholesky'),
xfail('matmul'),
skip('msort'),
xfail('nn.functional.linear'),
xfail('nn.functional.dropout'),
xfail('polar'),
xfail('special.zeta', 'grad'),
xfail('to_sparse'),
xfail('addcdiv'),
xfail('cholesky'),
xfail('cumulative_trapezoid'),
xfail('diag_embed'),
xfail('linalg.householder_product'),
xfail('logit'),
xfail('matrix_exp'),
xfail('trapezoid'),
xfail('trapz'),
xfail('trace'),
skip('nn.functional.binary_cross_entropy_with_logits') # seems to fail sometimes?
})
def test_aot_autograd_exhaustive(self, device, dtype, op):
def f(args, kwargs):
return op.op(*args, **kwargs)
if not op.supports_autograd:
return
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=True)
for sample_input in sample_inputs_itr:
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
if not all([isinstance(i, torch.Tensor) and i.dtype == torch.float for i in args]):
self.skipTest("not all inputs are float tensors")
if not all([isinstance(i, torch.Tensor) and i.dtype == torch.float for i in kwargs.values()]):
self.skipTest("not all inputs are float tensors")
continue
t = f(args, kwargs)
if isinstance(t, tuple):
self.skipTest("output is a tuple")
continue
def reset_grads():
def f(x):
x.grad = None
pytree.tree_map(f, args)
def get_grads(args):
return pytree.tree_map(lambda x: x.grad, args)
compiled_f = compiled_function(f, nop, nop)
reset_grads()
compiled_f(args, kwargs).sum().backward()
compiled_grad = get_grads(args)
reset_grads()
f(args, kwargs).sum().backward()
orig_grad = get_grads(args)
self.assertEqual(orig_grad, compiled_grad)
def create_new_arg(x):
return x.detach().uniform_(0, 1).requires_grad_(x.requires_grad)
args =
|
pytree
|
pytree.tree_map(create_new_arg, args)
|
reset_grads()
compiled_f(args, kwargs).sum().backward()
compiled_grad = get_grads(args)
|
torch
|
torch.utils._pytree.tree_map
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn as nn
import torch.utils._pytree as pytree
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
class TestPartitioning(TestCase):
@unittest.skipIf(not USE_NETWORKX, "networkx not available")
def test_recompute_partitioning(self):
def fn(a, b):
return torch.sin(torch.sin(a)) + b
ref_a =
|
torch
|
torch.rand(10, 10, requires_grad=True)
|
ref_b = torch.rand(10, 10, requires_grad=True)
ref = fn(ref_a, ref_b)
ref.sum().backward()
|
torch
|
torch.rand
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets =
|
torch
|
torch.randint(0, C, (N,), device=device)
|
def foo(y, targets):
return F.cross_entropy(y, targets)
|
torch
|
torch.randint
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x =
|
torch
|
torch.tensor([1., 2., 3.], device=device)
|
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
|
torch
|
torch.tensor
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1+1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 =
|
torch
|
torch.cos(y)
|
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
|
torch
|
torch.cos
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1+1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 = torch.cos(y)
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
self.assertEqual(grads[0], -x.sin())
self.assertEqual(grads[1], -y.sin())
def test_view_inplace_simple(self, device):
def foo(x):
x = x.clone()
x.view([]).sin_()
return x
x = torch.randn([], requires_grad=True, device=device)
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_invalid_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=-3)(x, y)
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=2)(x, y)
with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):
grad(torch.mul, argnums=[0])(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be int'):
grad(torch.mul, argnums=('0',))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, 0))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, -2))(x, y)
def test_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=0)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(0,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_out_of_order_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gy, gx = grad(torch.mul, argnums=(1, 0))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_negative_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=-2)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=-1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(-2,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(-2, -1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_grad_pytree_inputs(self, device):
x = torch.randn([], device=device)
def f(a, b):
x, y = a
return 1 * x + 2 * y + 3 * b['foo']
args = ((x, x), {'foo': x})
gx, gy = grad(f)(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), = grad(f, argnums=(0,))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), gz = grad(f, argnums=(0, 1))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
self.assertEqual(gz['foo'], torch.tensor(3., device=device))
def test_grad_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: [t, t], has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: (t, t + 2, t + 3), has_aux=True)(x)
def f(t):
y = t.sin()
return y.sum(), t.cos()
out, aux = grad(f, has_aux=True)(x)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.cos())
def test_grad_aux_pytree(self, device):
def f(x):
y = x.sin()
return y.sum(), {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, aux = grad(f, has_aux=True)(x)
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
self.assertEqual(out, x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), [x, aux]), has_aux=True)(x)
def test_zero_grad(self, device):
def f(x):
return (x['a']**2.0).sum()
inps = ({'a': torch.randn(10, device=device) + 3, 'b': torch.randn(10, device=device)})
grads = grad(f)(inps)
self.assertNotEqual(grads['a'].sum(), 0.0)
self.assertEqual(grads['b'].sum(), 0.0)
def test_unrelated_grad(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
def unrelated(x):
return y
result = grad(unrelated)(x)
self.assertEqual(result, torch.zeros_like(x))
def test_unrelated_vjp(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(x):
return y
out, vjp_fn = vjp(unrelated, x)
result = vjp_fn(v)
expected = (
|
torch
|
torch.zeros_like(x)
|
,)
self.assertEqual(result, expected)
def test_unrelated_vjp_multiple_inputs_outputs(self, device):
w = torch.tensor(3., device=device)
|
torch
|
torch.zeros_like
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1+1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 = torch.cos(y)
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
self.assertEqual(grads[0], -x.sin())
self.assertEqual(grads[1], -y.sin())
def test_view_inplace_simple(self, device):
def foo(x):
x = x.clone()
x.view([]).sin_()
return x
x = torch.randn([], requires_grad=True, device=device)
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_invalid_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=-3)(x, y)
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=2)(x, y)
with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):
grad(torch.mul, argnums=[0])(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be int'):
grad(torch.mul, argnums=('0',))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, 0))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, -2))(x, y)
def test_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=0)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(0,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_out_of_order_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gy, gx = grad(torch.mul, argnums=(1, 0))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_negative_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=-2)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=-1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(-2,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(-2, -1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_grad_pytree_inputs(self, device):
x = torch.randn([], device=device)
def f(a, b):
x, y = a
return 1 * x + 2 * y + 3 * b['foo']
args = ((x, x), {'foo': x})
gx, gy = grad(f)(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), = grad(f, argnums=(0,))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), gz = grad(f, argnums=(0, 1))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
self.assertEqual(gz['foo'], torch.tensor(3., device=device))
def test_grad_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: [t, t], has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: (t, t + 2, t + 3), has_aux=True)(x)
def f(t):
y = t.sin()
return y.sum(), t.cos()
out, aux = grad(f, has_aux=True)(x)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.cos())
def test_grad_aux_pytree(self, device):
def f(x):
y = x.sin()
return y.sum(), {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, aux = grad(f, has_aux=True)(x)
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
self.assertEqual(out, x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), [x, aux]), has_aux=True)(x)
def test_zero_grad(self, device):
def f(x):
return (x['a']**2.0).sum()
inps = ({'a': torch.randn(10, device=device) + 3, 'b': torch.randn(10, device=device)})
grads = grad(f)(inps)
self.assertNotEqual(grads['a'].sum(), 0.0)
self.assertEqual(grads['b'].sum(), 0.0)
def test_unrelated_grad(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
def unrelated(x):
return y
result = grad(unrelated)(x)
self.assertEqual(result, torch.zeros_like(x))
def test_unrelated_vjp(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(x):
return y
out, vjp_fn = vjp(unrelated, x)
result = vjp_fn(v)
expected = (torch.zeros_like(x),)
self.assertEqual(result, expected)
def test_unrelated_vjp_multiple_inputs_outputs(self, device):
w = torch.tensor(3., device=device)
x = torch.tensor(4., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(w, x):
return y, y, x
out, vjp_fn = vjp(unrelated, w, x)
result = vjp_fn((v, v, v))
expected = (torch.zeros_like(x), torch.ones_like(x))
self.assertEqual(result, expected)
@onlyCPU
def test_unrelated_hessian(self, device):
N = 5
M = 3
W = torch.randn(N, M, device=device)
def f(x):
return W @ x
x = torch.randn(M)
result = jacrev(jacrev(f))(x)
expected =
|
torch
|
torch.zeros(N, M, M, device=device)
|
self.assertEqual(result, expected)
def test_vjp_pytree_input(self, device):
def f(x):
|
torch
|
torch.zeros
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1+1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 = torch.cos(y)
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
self.assertEqual(grads[0], -x.sin())
self.assertEqual(grads[1], -y.sin())
def test_view_inplace_simple(self, device):
def foo(x):
x = x.clone()
x.view([]).sin_()
return x
x = torch.randn([], requires_grad=True, device=device)
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_invalid_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=-3)(x, y)
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=2)(x, y)
with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):
grad(torch.mul, argnums=[0])(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be int'):
grad(torch.mul, argnums=('0',))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, 0))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, -2))(x, y)
def test_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=0)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(0,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_out_of_order_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gy, gx = grad(torch.mul, argnums=(1, 0))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_negative_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=-2)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=-1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(-2,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(-2, -1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_grad_pytree_inputs(self, device):
x = torch.randn([], device=device)
def f(a, b):
x, y = a
return 1 * x + 2 * y + 3 * b['foo']
args = ((x, x), {'foo': x})
gx, gy = grad(f)(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), = grad(f, argnums=(0,))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), gz = grad(f, argnums=(0, 1))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
self.assertEqual(gz['foo'], torch.tensor(3., device=device))
def test_grad_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: [t, t], has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: (t, t + 2, t + 3), has_aux=True)(x)
def f(t):
y = t.sin()
return y.sum(), t.cos()
out, aux = grad(f, has_aux=True)(x)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.cos())
def test_grad_aux_pytree(self, device):
def f(x):
y = x.sin()
return y.sum(), {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, aux = grad(f, has_aux=True)(x)
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
self.assertEqual(out, x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), [x, aux]), has_aux=True)(x)
def test_zero_grad(self, device):
def f(x):
return (x['a']**2.0).sum()
inps = ({'a': torch.randn(10, device=device) + 3, 'b': torch.randn(10, device=device)})
grads = grad(f)(inps)
self.assertNotEqual(grads['a'].sum(), 0.0)
self.assertEqual(grads['b'].sum(), 0.0)
def test_unrelated_grad(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
def unrelated(x):
return y
result = grad(unrelated)(x)
self.assertEqual(result, torch.zeros_like(x))
def test_unrelated_vjp(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(x):
return y
out, vjp_fn = vjp(unrelated, x)
result = vjp_fn(v)
expected = (torch.zeros_like(x),)
self.assertEqual(result, expected)
def test_unrelated_vjp_multiple_inputs_outputs(self, device):
w = torch.tensor(3., device=device)
x = torch.tensor(4., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(w, x):
return y, y, x
out, vjp_fn = vjp(unrelated, w, x)
result = vjp_fn((v, v, v))
expected = (torch.zeros_like(x), torch.ones_like(x))
self.assertEqual(result, expected)
@onlyCPU
def test_unrelated_hessian(self, device):
N = 5
M = 3
W = torch.randn(N, M, device=device)
def f(x):
return W @ x
x = torch.randn(M)
result = jacrev(jacrev(f))(x)
expected = torch.zeros(N, M, M, device=device)
self.assertEqual(result, expected)
def test_vjp_pytree_input(self, device):
def f(x):
return x[0] * x[1][0]
x = torch.randn([], device=device)
v = torch.randn([], device=device)
out, vjp_fn = vjp(f, (x, (x, x)))
self.assertEqual(out, x * x)
result = vjp_fn(v)
self.assertEqual(result, ((x * v, (x * v, 0.)),))
def test_vjp_pytree_output(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
result, = vjp_fn((v1, (v2, v3)))
self.assertEqual(result, v1 + v2 + v3)
def test_vjp_outputs_can_any_pytree(self, device):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
for output in [None, ()]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): Expected f to be a function that has non-empty output"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): expected f\(\*primals\) to return only tensors"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
output, vjp_fn = vjp(lambda x: [x, x.sum()], x)
vjp_out, = vjp_fn([t, t.sum()])
assert isinstance(output, list) and len(output) == 2
assert isinstance(vjp_out, torch.Tensor)
output, vjp_fn = vjp(lambda x: {"x": x, "xsum": x.sum()}, x)
vjp_out, = vjp_fn({"x": t, "xsum": t.sum()})
assert isinstance(output, dict) and len(output) == 2 and "xsum" in output
assert isinstance(vjp_out, torch.Tensor)
def composite_output(x):
out = x.sum()
return [
(out, {"a": x, "out": [x, out]}),
]
output, vjp_fn = vjp(composite_output, x)
vjp_out, = vjp_fn([(t.sum(), {"a": t, "out": [t, t.sum()]}), ])
assert isinstance(output, list)
assert isinstance(output[0], tuple) and isinstance(output[0][1], dict)
assert isinstance(vjp_out, torch.Tensor)
def test_vjp_pytree_error(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
with self.assertRaisesRegex(RuntimeError, 'Expected pytree structure'):
result, = vjp_fn(((v1, (v2, v3)),))
def test_vjp_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: [t, t], x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: (t, t + 2, t + 3), x, has_aux=True)
def f(t):
y = t.sin()
return y, t.cos()
out, vjp_fn, aux = vjp(f, x, has_aux=True)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.sin())
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
def test_vjp_aux_pytree(self, device):
def f(x):
y = x.sin()
return y, {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, vjp_fn, aux = vjp(f, x, has_aux=True)
expected_out, expected_aux = f(x)
self.assertEqual(out, expected_out)
self.assertEqual(aux, expected_aux)
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, aux), x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, [x, aux]), x, has_aux=True)
def test_functional_init(self, device):
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 =
|
nn
|
nn.Linear(2, self.hidden_dim)
|
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
|
torch
|
torch.nn.Linear
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1+1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 = torch.cos(y)
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
self.assertEqual(grads[0], -x.sin())
self.assertEqual(grads[1], -y.sin())
def test_view_inplace_simple(self, device):
def foo(x):
x = x.clone()
x.view([]).sin_()
return x
x = torch.randn([], requires_grad=True, device=device)
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_invalid_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=-3)(x, y)
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=2)(x, y)
with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):
grad(torch.mul, argnums=[0])(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be int'):
grad(torch.mul, argnums=('0',))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, 0))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, -2))(x, y)
def test_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=0)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(0,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_out_of_order_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gy, gx = grad(torch.mul, argnums=(1, 0))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_negative_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=-2)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=-1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(-2,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(-2, -1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_grad_pytree_inputs(self, device):
x = torch.randn([], device=device)
def f(a, b):
x, y = a
return 1 * x + 2 * y + 3 * b['foo']
args = ((x, x), {'foo': x})
gx, gy = grad(f)(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), = grad(f, argnums=(0,))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), gz = grad(f, argnums=(0, 1))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
self.assertEqual(gz['foo'], torch.tensor(3., device=device))
def test_grad_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: [t, t], has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: (t, t + 2, t + 3), has_aux=True)(x)
def f(t):
y = t.sin()
return y.sum(), t.cos()
out, aux = grad(f, has_aux=True)(x)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.cos())
def test_grad_aux_pytree(self, device):
def f(x):
y = x.sin()
return y.sum(), {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, aux = grad(f, has_aux=True)(x)
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
self.assertEqual(out, x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), [x, aux]), has_aux=True)(x)
def test_zero_grad(self, device):
def f(x):
return (x['a']**2.0).sum()
inps = ({'a': torch.randn(10, device=device) + 3, 'b': torch.randn(10, device=device)})
grads = grad(f)(inps)
self.assertNotEqual(grads['a'].sum(), 0.0)
self.assertEqual(grads['b'].sum(), 0.0)
def test_unrelated_grad(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
def unrelated(x):
return y
result = grad(unrelated)(x)
self.assertEqual(result, torch.zeros_like(x))
def test_unrelated_vjp(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(x):
return y
out, vjp_fn = vjp(unrelated, x)
result = vjp_fn(v)
expected = (torch.zeros_like(x),)
self.assertEqual(result, expected)
def test_unrelated_vjp_multiple_inputs_outputs(self, device):
w = torch.tensor(3., device=device)
x = torch.tensor(4., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(w, x):
return y, y, x
out, vjp_fn = vjp(unrelated, w, x)
result = vjp_fn((v, v, v))
expected = (torch.zeros_like(x), torch.ones_like(x))
self.assertEqual(result, expected)
@onlyCPU
def test_unrelated_hessian(self, device):
N = 5
M = 3
W = torch.randn(N, M, device=device)
def f(x):
return W @ x
x = torch.randn(M)
result = jacrev(jacrev(f))(x)
expected = torch.zeros(N, M, M, device=device)
self.assertEqual(result, expected)
def test_vjp_pytree_input(self, device):
def f(x):
return x[0] * x[1][0]
x = torch.randn([], device=device)
v = torch.randn([], device=device)
out, vjp_fn = vjp(f, (x, (x, x)))
self.assertEqual(out, x * x)
result = vjp_fn(v)
self.assertEqual(result, ((x * v, (x * v, 0.)),))
def test_vjp_pytree_output(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
result, = vjp_fn((v1, (v2, v3)))
self.assertEqual(result, v1 + v2 + v3)
def test_vjp_outputs_can_any_pytree(self, device):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
for output in [None, ()]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): Expected f to be a function that has non-empty output"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): expected f\(\*primals\) to return only tensors"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
output, vjp_fn = vjp(lambda x: [x, x.sum()], x)
vjp_out, = vjp_fn([t, t.sum()])
assert isinstance(output, list) and len(output) == 2
assert isinstance(vjp_out, torch.Tensor)
output, vjp_fn = vjp(lambda x: {"x": x, "xsum": x.sum()}, x)
vjp_out, = vjp_fn({"x": t, "xsum": t.sum()})
assert isinstance(output, dict) and len(output) == 2 and "xsum" in output
assert isinstance(vjp_out, torch.Tensor)
def composite_output(x):
out = x.sum()
return [
(out, {"a": x, "out": [x, out]}),
]
output, vjp_fn = vjp(composite_output, x)
vjp_out, = vjp_fn([(t.sum(), {"a": t, "out": [t, t.sum()]}), ])
assert isinstance(output, list)
assert isinstance(output[0], tuple) and isinstance(output[0][1], dict)
assert isinstance(vjp_out, torch.Tensor)
def test_vjp_pytree_error(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
with self.assertRaisesRegex(RuntimeError, 'Expected pytree structure'):
result, = vjp_fn(((v1, (v2, v3)),))
def test_vjp_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: [t, t], x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: (t, t + 2, t + 3), x, has_aux=True)
def f(t):
y = t.sin()
return y, t.cos()
out, vjp_fn, aux = vjp(f, x, has_aux=True)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.sin())
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
def test_vjp_aux_pytree(self, device):
def f(x):
y = x.sin()
return y, {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, vjp_fn, aux = vjp(f, x, has_aux=True)
expected_out, expected_aux = f(x)
self.assertEqual(out, expected_out)
self.assertEqual(aux, expected_aux)
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, aux), x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, [x, aux]), x, has_aux=True)
def test_functional_init(self, device):
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x =
|
F
|
F.relu(x)
|
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
|
torch
|
torch.nn.functional.relu
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1+1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 = torch.cos(y)
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
self.assertEqual(grads[0], -x.sin())
self.assertEqual(grads[1], -y.sin())
def test_view_inplace_simple(self, device):
def foo(x):
x = x.clone()
x.view([]).sin_()
return x
x = torch.randn([], requires_grad=True, device=device)
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_invalid_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=-3)(x, y)
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=2)(x, y)
with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):
grad(torch.mul, argnums=[0])(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be int'):
grad(torch.mul, argnums=('0',))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, 0))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, -2))(x, y)
def test_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=0)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(0,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_out_of_order_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gy, gx = grad(torch.mul, argnums=(1, 0))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_negative_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=-2)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=-1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(-2,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(-2, -1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_grad_pytree_inputs(self, device):
x = torch.randn([], device=device)
def f(a, b):
x, y = a
return 1 * x + 2 * y + 3 * b['foo']
args = ((x, x), {'foo': x})
gx, gy = grad(f)(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), = grad(f, argnums=(0,))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), gz = grad(f, argnums=(0, 1))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
self.assertEqual(gz['foo'], torch.tensor(3., device=device))
def test_grad_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: [t, t], has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: (t, t + 2, t + 3), has_aux=True)(x)
def f(t):
y = t.sin()
return y.sum(), t.cos()
out, aux = grad(f, has_aux=True)(x)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.cos())
def test_grad_aux_pytree(self, device):
def f(x):
y = x.sin()
return y.sum(), {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, aux = grad(f, has_aux=True)(x)
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
self.assertEqual(out, x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), [x, aux]), has_aux=True)(x)
def test_zero_grad(self, device):
def f(x):
return (x['a']**2.0).sum()
inps = ({'a': torch.randn(10, device=device) + 3, 'b': torch.randn(10, device=device)})
grads = grad(f)(inps)
self.assertNotEqual(grads['a'].sum(), 0.0)
self.assertEqual(grads['b'].sum(), 0.0)
def test_unrelated_grad(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
def unrelated(x):
return y
result = grad(unrelated)(x)
self.assertEqual(result, torch.zeros_like(x))
def test_unrelated_vjp(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(x):
return y
out, vjp_fn = vjp(unrelated, x)
result = vjp_fn(v)
expected = (torch.zeros_like(x),)
self.assertEqual(result, expected)
def test_unrelated_vjp_multiple_inputs_outputs(self, device):
w = torch.tensor(3., device=device)
x = torch.tensor(4., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(w, x):
return y, y, x
out, vjp_fn = vjp(unrelated, w, x)
result = vjp_fn((v, v, v))
expected = (torch.zeros_like(x), torch.ones_like(x))
self.assertEqual(result, expected)
@onlyCPU
def test_unrelated_hessian(self, device):
N = 5
M = 3
W = torch.randn(N, M, device=device)
def f(x):
return W @ x
x = torch.randn(M)
result = jacrev(jacrev(f))(x)
expected = torch.zeros(N, M, M, device=device)
self.assertEqual(result, expected)
def test_vjp_pytree_input(self, device):
def f(x):
return x[0] * x[1][0]
x = torch.randn([], device=device)
v = torch.randn([], device=device)
out, vjp_fn = vjp(f, (x, (x, x)))
self.assertEqual(out, x * x)
result = vjp_fn(v)
self.assertEqual(result, ((x * v, (x * v, 0.)),))
def test_vjp_pytree_output(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
result, = vjp_fn((v1, (v2, v3)))
self.assertEqual(result, v1 + v2 + v3)
def test_vjp_outputs_can_any_pytree(self, device):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
for output in [None, ()]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): Expected f to be a function that has non-empty output"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): expected f\(\*primals\) to return only tensors"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
output, vjp_fn = vjp(lambda x: [x, x.sum()], x)
vjp_out, = vjp_fn([t, t.sum()])
assert isinstance(output, list) and len(output) == 2
assert isinstance(vjp_out, torch.Tensor)
output, vjp_fn = vjp(lambda x: {"x": x, "xsum": x.sum()}, x)
vjp_out, = vjp_fn({"x": t, "xsum": t.sum()})
assert isinstance(output, dict) and len(output) == 2 and "xsum" in output
assert isinstance(vjp_out, torch.Tensor)
def composite_output(x):
out = x.sum()
return [
(out, {"a": x, "out": [x, out]}),
]
output, vjp_fn = vjp(composite_output, x)
vjp_out, = vjp_fn([(t.sum(), {"a": t, "out": [t, t.sum()]}), ])
assert isinstance(output, list)
assert isinstance(output[0], tuple) and isinstance(output[0][1], dict)
assert isinstance(vjp_out, torch.Tensor)
def test_vjp_pytree_error(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
with self.assertRaisesRegex(RuntimeError, 'Expected pytree structure'):
result, = vjp_fn(((v1, (v2, v3)),))
def test_vjp_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: [t, t], x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: (t, t + 2, t + 3), x, has_aux=True)
def f(t):
y = t.sin()
return y, t.cos()
out, vjp_fn, aux = vjp(f, x, has_aux=True)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.sin())
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
def test_vjp_aux_pytree(self, device):
def f(x):
y = x.sin()
return y, {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, vjp_fn, aux = vjp(f, x, has_aux=True)
expected_out, expected_aux = f(x)
self.assertEqual(out, expected_out)
self.assertEqual(aux, expected_aux)
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, aux), x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, [x, aux]), x, has_aux=True)
def test_functional_init(self, device):
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x =
|
F
|
F.log_softmax(x, -1)
|
return x
B = 10
weights, fn, _ = functional_init(MLPClassifier, (B,), device=device)(32, 2)
|
torch
|
torch.nn.functional.log_softmax
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1+1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 = torch.cos(y)
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
self.assertEqual(grads[0], -x.sin())
self.assertEqual(grads[1], -y.sin())
def test_view_inplace_simple(self, device):
def foo(x):
x = x.clone()
x.view([]).sin_()
return x
x = torch.randn([], requires_grad=True, device=device)
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_invalid_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=-3)(x, y)
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=2)(x, y)
with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):
grad(torch.mul, argnums=[0])(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be int'):
grad(torch.mul, argnums=('0',))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, 0))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, -2))(x, y)
def test_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=0)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(0,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_out_of_order_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gy, gx = grad(torch.mul, argnums=(1, 0))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_negative_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=-2)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=-1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(-2,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(-2, -1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_grad_pytree_inputs(self, device):
x = torch.randn([], device=device)
def f(a, b):
x, y = a
return 1 * x + 2 * y + 3 * b['foo']
args = ((x, x), {'foo': x})
gx, gy = grad(f)(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), = grad(f, argnums=(0,))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), gz = grad(f, argnums=(0, 1))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
self.assertEqual(gz['foo'], torch.tensor(3., device=device))
def test_grad_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: [t, t], has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: (t, t + 2, t + 3), has_aux=True)(x)
def f(t):
y = t.sin()
return y.sum(), t.cos()
out, aux = grad(f, has_aux=True)(x)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.cos())
def test_grad_aux_pytree(self, device):
def f(x):
y = x.sin()
return y.sum(), {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, aux = grad(f, has_aux=True)(x)
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
self.assertEqual(out, x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), [x, aux]), has_aux=True)(x)
def test_zero_grad(self, device):
def f(x):
return (x['a']**2.0).sum()
inps = ({'a': torch.randn(10, device=device) + 3, 'b': torch.randn(10, device=device)})
grads = grad(f)(inps)
self.assertNotEqual(grads['a'].sum(), 0.0)
self.assertEqual(grads['b'].sum(), 0.0)
def test_unrelated_grad(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
def unrelated(x):
return y
result = grad(unrelated)(x)
self.assertEqual(result, torch.zeros_like(x))
def test_unrelated_vjp(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(x):
return y
out, vjp_fn = vjp(unrelated, x)
result = vjp_fn(v)
expected = (torch.zeros_like(x),)
self.assertEqual(result, expected)
def test_unrelated_vjp_multiple_inputs_outputs(self, device):
w = torch.tensor(3., device=device)
x = torch.tensor(4., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(w, x):
return y, y, x
out, vjp_fn = vjp(unrelated, w, x)
result = vjp_fn((v, v, v))
expected = (torch.zeros_like(x), torch.ones_like(x))
self.assertEqual(result, expected)
@onlyCPU
def test_unrelated_hessian(self, device):
N = 5
M = 3
W = torch.randn(N, M, device=device)
def f(x):
return W @ x
x = torch.randn(M)
result = jacrev(jacrev(f))(x)
expected = torch.zeros(N, M, M, device=device)
self.assertEqual(result, expected)
def test_vjp_pytree_input(self, device):
def f(x):
return x[0] * x[1][0]
x = torch.randn([], device=device)
v = torch.randn([], device=device)
out, vjp_fn = vjp(f, (x, (x, x)))
self.assertEqual(out, x * x)
result = vjp_fn(v)
self.assertEqual(result, ((x * v, (x * v, 0.)),))
def test_vjp_pytree_output(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
result, = vjp_fn((v1, (v2, v3)))
self.assertEqual(result, v1 + v2 + v3)
def test_vjp_outputs_can_any_pytree(self, device):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
for output in [None, ()]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): Expected f to be a function that has non-empty output"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): expected f\(\*primals\) to return only tensors"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
output, vjp_fn = vjp(lambda x: [x, x.sum()], x)
vjp_out, = vjp_fn([t, t.sum()])
assert isinstance(output, list) and len(output) == 2
assert isinstance(vjp_out, torch.Tensor)
output, vjp_fn = vjp(lambda x: {"x": x, "xsum": x.sum()}, x)
vjp_out, = vjp_fn({"x": t, "xsum": t.sum()})
assert isinstance(output, dict) and len(output) == 2 and "xsum" in output
assert isinstance(vjp_out, torch.Tensor)
def composite_output(x):
out = x.sum()
return [
(out, {"a": x, "out": [x, out]}),
]
output, vjp_fn = vjp(composite_output, x)
vjp_out, = vjp_fn([(t.sum(), {"a": t, "out": [t, t.sum()]}), ])
assert isinstance(output, list)
assert isinstance(output[0], tuple) and isinstance(output[0][1], dict)
assert isinstance(vjp_out, torch.Tensor)
def test_vjp_pytree_error(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
with self.assertRaisesRegex(RuntimeError, 'Expected pytree structure'):
result, = vjp_fn(((v1, (v2, v3)),))
def test_vjp_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: [t, t], x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: (t, t + 2, t + 3), x, has_aux=True)
def f(t):
y = t.sin()
return y, t.cos()
out, vjp_fn, aux = vjp(f, x, has_aux=True)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.sin())
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
def test_vjp_aux_pytree(self, device):
def f(x):
y = x.sin()
return y, {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, vjp_fn, aux = vjp(f, x, has_aux=True)
expected_out, expected_aux = f(x)
self.assertEqual(out, expected_out)
self.assertEqual(aux, expected_aux)
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, aux), x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, [x, aux]), x, has_aux=True)
def test_functional_init(self, device):
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
B = 10
weights, fn, _ = functional_init(MLPClassifier, (B,), device=device)(32, 2)
inputs = torch.randn(B, 7, 2, device=device)
vmap(fn)(weights, (inputs,))
def test_functional_init_with_buffers(self, device):
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.bn =
|
nn
|
nn.BatchNorm1d(self.hidden_dim, affine=True)
|
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
|
torch
|
torch.nn.BatchNorm1d
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestVmapOfGrad(TestCase):
def test_per_sample_grads_inplace_view(self, device):
def compute_loss(weight, x, t):
x = x.mm(weight)
y = x.squeeze_(0)
return (y - t).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 1, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected =
|
torch
|
torch.stack(expected)
|
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_new_zeros_materializes_tensor(self, device):
|
torch
|
torch.stack
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestVmapOfGrad(TestCase):
def test_per_sample_grads_inplace_view(self, device):
def compute_loss(weight, x, t):
x = x.mm(weight)
y = x.squeeze_(0)
return (y - t).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 1, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_new_zeros_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_zeros((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_new_empty_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_empty((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_per_sample_grads_simple(self, device):
def compute_loss(weight, x, t):
y = x @ weight
return ((y - t) ** 2).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_per_sample_grads_embeddingnet(self, device):
class SampleNet(nn.Module):
def __init__(self, vocab_size: int):
super().__init__()
self.emb =
|
nn
|
nn.Embedding(vocab_size, 16)
|
self.fc1 = nn.Linear(16, 16)
self.fc2 = nn.Linear(16, 2)
def forward(self, x):
|
torch
|
torch.nn.Embedding
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestVmapOfGrad(TestCase):
def test_per_sample_grads_inplace_view(self, device):
def compute_loss(weight, x, t):
x = x.mm(weight)
y = x.squeeze_(0)
return (y - t).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 1, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_new_zeros_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_zeros((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_new_empty_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_empty((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_per_sample_grads_simple(self, device):
def compute_loss(weight, x, t):
y = x @ weight
return ((y - t) ** 2).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_per_sample_grads_embeddingnet(self, device):
class SampleNet(nn.Module):
def __init__(self, vocab_size: int):
super().__init__()
self.emb = nn.Embedding(vocab_size, 16)
self.fc1 = nn.Linear(16, 16)
self.fc2 = nn.Linear(16, 2)
def forward(self, x):
x = self.emb(x)
x =
|
torch
|
torch.transpose(x, -1, -2)
|
x = torch.mean(x, -1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
|
torch
|
torch.transpose
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestVmapOfGrad(TestCase):
def test_per_sample_grads_inplace_view(self, device):
def compute_loss(weight, x, t):
x = x.mm(weight)
y = x.squeeze_(0)
return (y - t).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 1, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_new_zeros_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_zeros((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_new_empty_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_empty((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_per_sample_grads_simple(self, device):
def compute_loss(weight, x, t):
y = x @ weight
return ((y - t) ** 2).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_per_sample_grads_embeddingnet(self, device):
class SampleNet(nn.Module):
def __init__(self, vocab_size: int):
super().__init__()
self.emb = nn.Embedding(vocab_size, 16)
self.fc1 = nn.Linear(16, 16)
self.fc2 = nn.Linear(16, 2)
def forward(self, x):
x = self.emb(x)
x = torch.transpose(x, -1, -2)
x =
|
torch
|
torch.mean(x, -1)
|
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
|
torch
|
torch.mean
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestVmapOfGrad(TestCase):
def test_per_sample_grads_inplace_view(self, device):
def compute_loss(weight, x, t):
x = x.mm(weight)
y = x.squeeze_(0)
return (y - t).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 1, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_new_zeros_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_zeros((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_new_empty_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_empty((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_per_sample_grads_simple(self, device):
def compute_loss(weight, x, t):
y = x @ weight
return ((y - t) ** 2).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_per_sample_grads_embeddingnet(self, device):
class SampleNet(nn.Module):
def __init__(self, vocab_size: int):
super().__init__()
self.emb = nn.Embedding(vocab_size, 16)
self.fc1 = nn.Linear(16, 16)
self.fc2 = nn.Linear(16, 2)
def forward(self, x):
x = self.emb(x)
x = torch.transpose(x, -1, -2)
x = torch.mean(x, -1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
def name(self):
return "SampleNet"
vocab_size = 1000
batch_shape = [64]
words_per_sentence = 5
data = torch.randint(0, vocab_size, (*batch_shape, words_per_sentence), device=device)
targets = torch.randint(0, 1, (*batch_shape,), device=device)
net = SampleNet(vocab_size).to(device=device)
criterion =
|
nn
|
nn.CrossEntropyLoss()
|
net_func, weights = make_functional(net)
def compute_loss(weights, data, target):
|
torch
|
torch.nn.CrossEntropyLoss
|
torch_direct_api
|
v_1_10_0
|
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
class TestVmapOfGrad(TestCase):
def test_per_sample_grads_inplace_view(self, device):
def compute_loss(weight, x, t):
x = x.mm(weight)
y = x.squeeze_(0)
return (y - t).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 1, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_new_zeros_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_zeros((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_new_empty_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_empty((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_per_sample_grads_simple(self, device):
def compute_loss(weight, x, t):
y = x @ weight
return ((y - t) ** 2).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_per_sample_grads_embeddingnet(self, device):
class SampleNet(nn.Module):
def __init__(self, vocab_size: int):
super().__init__()
self.emb = nn.Embedding(vocab_size, 16)
self.fc1 = nn.Linear(16, 16)
self.fc2 = nn.Linear(16, 2)
def forward(self, x):
x = self.emb(x)
x = torch.transpose(x, -1, -2)
x = torch.mean(x, -1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
def name(self):
return "SampleNet"
vocab_size = 1000
batch_shape = [64]
words_per_sentence = 5
data = torch.randint(0, vocab_size, (*batch_shape, words_per_sentence), device=device)
targets = torch.randint(0, 1, (*batch_shape,), device=device)
net = SampleNet(vocab_size).to(device=device)
criterion = nn.CrossEntropyLoss()
net_func, weights = make_functional(net)
def compute_loss(weights, data, target):
output = net_func(weights, data)
result = criterion(output, target)
return result
expected = [grad(compute_loss)(weights, data[i], targets[i]) for i in range(64)]
expected = zip(*expected)
expected = tuple(torch.stack(shards) for shards in expected)
result = vmap(partial(grad(compute_loss), weights))(data, targets)
for r, e in zip(result, expected):
self.assertEqual(r, e, atol=0, rtol=1e-4)
def test_log_softmax(self, device):
x = torch.randn(3, 5, device=device)
v = torch.randn(5, device=device)
def foo(x, v):
_, vjp_fn = vjp(partial(torch.log_softmax, dim=-1), x)
return vjp_fn(v)[0]
result = vmap(foo, (0, None))(x, v)
v = v.expand_as(x)
x.requires_grad_()
output =
|
torch
|
torch.log_softmax(x, dim=-1)
|
output.backward(v)
self.assertEqual(result, x.grad)
|
torch
|
torch.log_softmax
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 488