mirror of
https://github.com/Azure/MachineLearningNotebooks.git
synced 2025-12-20 17:45:10 -05:00
quickstart added
This commit is contained in:
52
tutorials/get-started-day1/IDE-users/src/train.py
Normal file
52
tutorials/get-started-day1/IDE-users/src/train.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import torch
|
||||
import torch.optim as optim
|
||||
import torchvision
|
||||
import torchvision.transforms as transforms
|
||||
|
||||
from model import Net
|
||||
|
||||
# download CIFAR 10 data
|
||||
trainset = torchvision.datasets.CIFAR10(
|
||||
root="./data",
|
||||
train=True,
|
||||
download=True,
|
||||
transform=torchvision.transforms.ToTensor(),
|
||||
)
|
||||
trainloader = torch.utils.data.DataLoader(
|
||||
trainset, batch_size=4, shuffle=True, num_workers=2
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
# define convolutional network
|
||||
net = Net()
|
||||
|
||||
# set up pytorch loss / optimizer
|
||||
criterion = torch.nn.CrossEntropyLoss()
|
||||
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
|
||||
|
||||
# train the network
|
||||
for epoch in range(2):
|
||||
|
||||
running_loss = 0.0
|
||||
for i, data in enumerate(trainloader, 0):
|
||||
# unpack the data
|
||||
inputs, labels = data
|
||||
|
||||
# zero the parameter gradients
|
||||
optimizer.zero_grad()
|
||||
|
||||
# forward + backward + optimize
|
||||
outputs = net(inputs)
|
||||
loss = criterion(outputs, labels)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# print statistics
|
||||
running_loss += loss.item()
|
||||
if i % 2000 == 1999:
|
||||
loss = running_loss / 2000
|
||||
print(f"epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}")
|
||||
running_loss = 0.0
|
||||
|
||||
print("Finished Training")
|
||||
Reference in New Issue
Block a user