mirror of
https://github.com/Azure/MachineLearningNotebooks.git
synced 2025-12-21 18:15:13 -05:00
quickstart added
This commit is contained in:
2
tutorials/get-started-day1/code/hello/hello.py
Normal file
2
tutorials/get-started-day1/code/hello/hello.py
Normal file
@@ -0,0 +1,2 @@
|
||||
|
||||
print("hello world!")
|
||||
@@ -0,0 +1,22 @@
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
class Net(nn.Module):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.conv1 = nn.Conv2d(3, 6, 5)
|
||||
self.pool = nn.MaxPool2d(2, 2)
|
||||
self.conv2 = nn.Conv2d(6, 16, 5)
|
||||
self.fc1 = nn.Linear(16 * 5 * 5, 120)
|
||||
self.fc2 = nn.Linear(120, 84)
|
||||
self.fc3 = nn.Linear(84, 10)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.pool(F.relu(self.conv1(x)))
|
||||
x = self.pool(F.relu(self.conv2(x)))
|
||||
x = x.view(-1, 16 * 5 * 5)
|
||||
x = F.relu(self.fc1(x))
|
||||
x = F.relu(self.fc2(x))
|
||||
x = self.fc3(x)
|
||||
return x
|
||||
@@ -0,0 +1,62 @@
|
||||
import torch
|
||||
import torch.optim as optim
|
||||
import torchvision
|
||||
import torchvision.transforms as transforms
|
||||
|
||||
from model import Net
|
||||
from azureml.core import Run
|
||||
|
||||
|
||||
# ADDITIONAL CODE: get AML run from the current context
|
||||
run = Run.get_context()
|
||||
|
||||
# download CIFAR 10 data
|
||||
trainset = torchvision.datasets.CIFAR10(
|
||||
root='./data',
|
||||
train=True,
|
||||
download=True,
|
||||
transform=torchvision.transforms.ToTensor()
|
||||
)
|
||||
trainloader = torch.utils.data.DataLoader(
|
||||
trainset,
|
||||
batch_size=4,
|
||||
shuffle=True,
|
||||
num_workers=2
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
# define convolutional network
|
||||
net = Net()
|
||||
|
||||
# set up pytorch loss / optimizer
|
||||
criterion = torch.nn.CrossEntropyLoss()
|
||||
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
|
||||
|
||||
# train the network
|
||||
for epoch in range(2):
|
||||
|
||||
running_loss = 0.0
|
||||
for i, data in enumerate(trainloader, 0):
|
||||
# unpack the data
|
||||
inputs, labels = data
|
||||
|
||||
# zero the parameter gradients
|
||||
optimizer.zero_grad()
|
||||
|
||||
# forward + backward + optimize
|
||||
outputs = net(inputs)
|
||||
loss = criterion(outputs, labels)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# print statistics
|
||||
running_loss += loss.item()
|
||||
if i % 2000 == 1999:
|
||||
loss = running_loss / 2000
|
||||
# ADDITIONAL CODE: log loss metric to AML
|
||||
run.log('loss', loss)
|
||||
print(f'epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}')
|
||||
running_loss = 0.0
|
||||
|
||||
print('Finished Training')
|
||||
@@ -0,0 +1,22 @@
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
class Net(nn.Module):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.conv1 = nn.Conv2d(3, 6, 5)
|
||||
self.pool = nn.MaxPool2d(2, 2)
|
||||
self.conv2 = nn.Conv2d(6, 16, 5)
|
||||
self.fc1 = nn.Linear(16 * 5 * 5, 120)
|
||||
self.fc2 = nn.Linear(120, 84)
|
||||
self.fc3 = nn.Linear(84, 10)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.pool(F.relu(self.conv1(x)))
|
||||
x = self.pool(F.relu(self.conv2(x)))
|
||||
x = x.view(-1, 16 * 5 * 5)
|
||||
x = F.relu(self.fc1(x))
|
||||
x = F.relu(self.fc2(x))
|
||||
x = self.fc3(x)
|
||||
return x
|
||||
@@ -0,0 +1,52 @@
|
||||
import torch
|
||||
import torch.optim as optim
|
||||
import torchvision
|
||||
import torchvision.transforms as transforms
|
||||
|
||||
from model import Net
|
||||
|
||||
# download CIFAR 10 data
|
||||
trainset = torchvision.datasets.CIFAR10(
|
||||
root="./data",
|
||||
train=True,
|
||||
download=True,
|
||||
transform=torchvision.transforms.ToTensor(),
|
||||
)
|
||||
trainloader = torch.utils.data.DataLoader(
|
||||
trainset, batch_size=4, shuffle=True, num_workers=2
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
# define convolutional network
|
||||
net = Net()
|
||||
|
||||
# set up pytorch loss / optimizer
|
||||
criterion = torch.nn.CrossEntropyLoss()
|
||||
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
|
||||
|
||||
# train the network
|
||||
for epoch in range(2):
|
||||
|
||||
running_loss = 0.0
|
||||
for i, data in enumerate(trainloader, 0):
|
||||
# unpack the data
|
||||
inputs, labels = data
|
||||
|
||||
# zero the parameter gradients
|
||||
optimizer.zero_grad()
|
||||
|
||||
# forward + backward + optimize
|
||||
outputs = net(inputs)
|
||||
loss = criterion(outputs, labels)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# print statistics
|
||||
running_loss += loss.item()
|
||||
if i % 2000 == 1999:
|
||||
loss = running_loss / 2000
|
||||
print(f"epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}")
|
||||
running_loss = 0.0
|
||||
|
||||
print("Finished Training")
|
||||
@@ -0,0 +1,22 @@
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
class Net(nn.Module):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.conv1 = nn.Conv2d(3, 6, 5)
|
||||
self.pool = nn.MaxPool2d(2, 2)
|
||||
self.conv2 = nn.Conv2d(6, 16, 5)
|
||||
self.fc1 = nn.Linear(16 * 5 * 5, 120)
|
||||
self.fc2 = nn.Linear(120, 84)
|
||||
self.fc3 = nn.Linear(84, 10)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.pool(F.relu(self.conv1(x)))
|
||||
x = self.pool(F.relu(self.conv2(x)))
|
||||
x = x.view(-1, 16 * 5 * 5)
|
||||
x = F.relu(self.fc1(x))
|
||||
x = F.relu(self.fc2(x))
|
||||
x = self.fc3(x)
|
||||
return x
|
||||
@@ -0,0 +1,96 @@
|
||||
|
||||
import os
|
||||
import argparse
|
||||
import torch
|
||||
import torch.optim as optim
|
||||
import torchvision
|
||||
import torchvision.transforms as transforms
|
||||
|
||||
from model import Net
|
||||
from azureml.core import Run
|
||||
|
||||
run = Run.get_context()
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--data_path',
|
||||
type=str,
|
||||
help='Path to the training data'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--learning_rate',
|
||||
type=float,
|
||||
default=0.001,
|
||||
help='Learning rate for SGD'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--momentum',
|
||||
type=float,
|
||||
default=0.9,
|
||||
help='Momentum for SGD'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
print("===== DATA =====")
|
||||
print("DATA PATH: " + args.data_path)
|
||||
print("LIST FILES IN DATA PATH...")
|
||||
print(os.listdir(args.data_path))
|
||||
print("================")
|
||||
|
||||
# prepare DataLoader for CIFAR10 data
|
||||
transform = transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
|
||||
])
|
||||
trainset = torchvision.datasets.CIFAR10(
|
||||
root=args.data_path,
|
||||
train=True,
|
||||
download=False,
|
||||
transform=transform,
|
||||
)
|
||||
trainloader = torch.utils.data.DataLoader(
|
||||
trainset,
|
||||
batch_size=4,
|
||||
shuffle=True,
|
||||
num_workers=2
|
||||
)
|
||||
|
||||
# define convolutional network
|
||||
net = Net()
|
||||
|
||||
# set up pytorch loss / optimizer
|
||||
criterion = torch.nn.CrossEntropyLoss()
|
||||
optimizer = optim.SGD(
|
||||
net.parameters(),
|
||||
lr=args.learning_rate,
|
||||
momentum=args.momentum,
|
||||
)
|
||||
|
||||
# train the network
|
||||
for epoch in range(2):
|
||||
|
||||
running_loss = 0.0
|
||||
for i, data in enumerate(trainloader, 0):
|
||||
# unpack the data
|
||||
inputs, labels = data
|
||||
|
||||
# zero the parameter gradients
|
||||
optimizer.zero_grad()
|
||||
|
||||
# forward + backward + optimize
|
||||
outputs = net(inputs)
|
||||
loss = criterion(outputs, labels)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# print statistics
|
||||
running_loss += loss.item()
|
||||
if i % 2000 == 1999:
|
||||
loss = running_loss / 2000
|
||||
run.log('loss', loss) # log loss metric to AML
|
||||
print(f'epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}')
|
||||
running_loss = 0.0
|
||||
|
||||
print('Finished Training')
|
||||
Reference in New Issue
Block a user