mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
This is a lot of files changed! Don't panic! Here's how it works: * Previously, we set `follow_imports = silent` for our mypy.ini configuration. Per https://mypy.readthedocs.io/en/stable/running_mypy.html#follow-imports, what this does is whenever we have an import to a module which is not listed as a file to be typechecked in mypy, we typecheck it as normal but suppress all errors that occurred in that file. * When mypy is run inside lintrunner, the list of files is precisely the files covered by the glob in lintrunner.toml, but with files in excludes excluded. * The top-level directive `# mypy: ignore-errors` instructs mypy to typecheck the file as normal, but ignore all errors. * Therefore, it should be equivalent to set `follow_imports = normal`, if we put `# mypy: ignore-errors` on all files that were previously excluded from the file list. * Having done this, we can remove the exclude list from .lintrunner.toml, since excluding a file from typechecking is baked into the files themselves. * torch/_dynamo and torch/_inductor were previously in the exclude list, because they were covered by MYPYINDUCTOR. It is not OK to mark these as `# mypy: ignore-errors` as this will impede typechecking on the alternate configuration. So they are temporarily being checked twice, but I am suppressing the errors in these files as the configurations are not quite the same. I plan to unify the configurations so this is only a temporary state. * There were some straggler type errors after these changes somehow, so I fixed them as needed. There weren't that many. In the future, to start type checking a file, just remove the ignore-errors directive from the top of the file. The codemod was done with this script authored by GPT-4: ``` import glob exclude_patterns = [ ... ] for pattern in exclude_patterns: for filepath in glob.glob(pattern, recursive=True): if filepath.endswith('.py'): with open(filepath, 'r+') as f: content = f.read() f.seek(0, 0) f.write('# mypy: ignore-errors\n\n' + content) ``` Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/118414 Approved by: https://github.com/thiagocrepaldi, https://github.com/albanD
96 lines
2.7 KiB
Python
96 lines
2.7 KiB
Python
# mypy: ignore-errors
|
|
|
|
import os
|
|
|
|
import torch
|
|
import torch._lazy
|
|
import torch._lazy.metrics
|
|
import torch._lazy.ts_backend
|
|
import torch.nn as nn
|
|
import torch.nn.functional as F
|
|
import torch.optim as optim
|
|
from torch.optim.lr_scheduler import StepLR
|
|
from torchvision import datasets, transforms
|
|
|
|
torch._lazy.ts_backend.init()
|
|
|
|
|
|
class Net(nn.Module):
|
|
def __init__(self):
|
|
super().__init__()
|
|
self.conv1 = nn.Conv2d(1, 32, 3, 1)
|
|
self.conv2 = nn.Conv2d(32, 64, 3, 1)
|
|
self.dropout1 = nn.Dropout(0.25)
|
|
self.dropout2 = nn.Dropout(0.5)
|
|
self.fc1 = nn.Linear(9216, 128)
|
|
self.fc2 = nn.Linear(128, 10)
|
|
|
|
def forward(self, x):
|
|
x = self.conv1(x)
|
|
x = F.relu(x)
|
|
x = self.conv2(x)
|
|
x = F.relu(x)
|
|
x = F.max_pool2d(x, 2)
|
|
x = self.dropout1(x)
|
|
x = torch.flatten(x, 1)
|
|
x = self.fc1(x)
|
|
x = F.relu(x)
|
|
x = self.dropout2(x)
|
|
x = self.fc2(x)
|
|
output = F.log_softmax(x, dim=1)
|
|
return output
|
|
|
|
|
|
def train(log_interval, model, device, train_loader, optimizer, epoch):
|
|
model.train()
|
|
for batch_idx, (data, target) in enumerate(train_loader):
|
|
data, target = data.to(device), target.to(device)
|
|
optimizer.zero_grad(set_to_none=True)
|
|
output = model(data)
|
|
loss = F.nll_loss(output, target)
|
|
loss.backward()
|
|
optimizer.step()
|
|
torch._lazy.mark_step()
|
|
|
|
if batch_idx % log_interval == 0:
|
|
print(
|
|
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
|
|
epoch,
|
|
batch_idx * len(data),
|
|
len(train_loader.dataset),
|
|
100.0 * batch_idx / len(train_loader),
|
|
loss.item(),
|
|
)
|
|
)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
bsz = 64
|
|
device = "lazy"
|
|
epochs = 14
|
|
log_interval = 10
|
|
lr = 1
|
|
gamma = 0.7
|
|
train_kwargs = {"batch_size": bsz}
|
|
# if we want to use CUDA
|
|
if "LTC_TS_CUDA" in os.environ:
|
|
cuda_kwargs = {
|
|
"num_workers": 1,
|
|
"pin_memory": True,
|
|
"shuffle": True,
|
|
"batch_size": bsz,
|
|
}
|
|
train_kwargs.update(cuda_kwargs)
|
|
|
|
transform = transforms.Compose(
|
|
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
|
|
)
|
|
dataset1 = datasets.MNIST("./data", train=True, download=True, transform=transform)
|
|
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
|
|
model = Net().to(device)
|
|
optimizer = optim.Adadelta(model.parameters(), lr=lr)
|
|
scheduler = StepLR(optimizer, step_size=1, gamma=gamma)
|
|
for epoch in range(1, epochs + 1):
|
|
train(log_interval, model, device, train_loader, optimizer, epoch)
|
|
scheduler.step()
|