quickstart added
This commit is contained in:
12
tutorials/get-started-day1/IDE-users/01-create-workspace.py
Normal file
12
tutorials/get-started-day1/IDE-users/01-create-workspace.py
Normal file
@@ -0,0 +1,12 @@
|
||||
# 01-create-workspace.py
|
||||
from azureml.core import Workspace
|
||||
|
||||
# Example locations: 'westeurope' or 'eastus2' or 'westus2' or 'southeastasia'.
|
||||
ws = Workspace.create(name='<my_workspace_name>',
|
||||
subscription_id='<azure-subscription-id>',
|
||||
resource_group='<myresourcegroup>',
|
||||
create_resource_group=True,
|
||||
location='<NAME_OF_REGION>')
|
||||
|
||||
# write out the workspace details to a configuration file: .azureml/config.json
|
||||
ws.write_config(path='.azureml')
|
||||
23
tutorials/get-started-day1/IDE-users/02-create-compute.py
Normal file
23
tutorials/get-started-day1/IDE-users/02-create-compute.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# 02-create-compute.py
|
||||
from azureml.core import Workspace
|
||||
from azureml.core.compute import ComputeTarget, AmlCompute
|
||||
from azureml.core.compute_target import ComputeTargetException
|
||||
|
||||
ws = Workspace.from_config()
|
||||
|
||||
# Choose a name for your CPU cluster
|
||||
cpu_cluster_name = "cpu-cluster"
|
||||
|
||||
# Verify that cluster does not exist already
|
||||
try:
|
||||
cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)
|
||||
print('Found existing cluster, use it.')
|
||||
except ComputeTargetException:
|
||||
cfg = AmlCompute.provisioning_configuration(
|
||||
vm_size='STANDARD_D2_V2',
|
||||
max_nodes=4,
|
||||
idle_seconds_before_scaledown=2400
|
||||
)
|
||||
cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, cfg)
|
||||
|
||||
cpu_cluster.wait_for_completion(show_output=True)
|
||||
13
tutorials/get-started-day1/IDE-users/03-run-hello.py
Normal file
13
tutorials/get-started-day1/IDE-users/03-run-hello.py
Normal file
@@ -0,0 +1,13 @@
|
||||
# 03-run-hello.py
|
||||
from azureml.core import Workspace, Experiment, ScriptRunConfig
|
||||
|
||||
ws = Workspace.from_config()
|
||||
experiment = Experiment(workspace=ws, name='day1-experiment-hello')
|
||||
|
||||
config = ScriptRunConfig(source_directory='./src',
|
||||
script='hello.py',
|
||||
compute_target='cpu-cluster')
|
||||
|
||||
run = experiment.submit(config)
|
||||
aml_url = run.get_portal_url()
|
||||
print(aml_url)
|
||||
24
tutorials/get-started-day1/IDE-users/04-run-pytorch.py
Normal file
24
tutorials/get-started-day1/IDE-users/04-run-pytorch.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# 04-run-pytorch.py
|
||||
from azureml.core import Workspace
|
||||
from azureml.core import Experiment
|
||||
from azureml.core import Environment
|
||||
from azureml.core import ScriptRunConfig
|
||||
|
||||
if __name__ == "__main__":
|
||||
ws = Workspace.from_config()
|
||||
experiment = Experiment(workspace=ws, name='day1-experiment-train')
|
||||
config = ScriptRunConfig(source_directory='./src',
|
||||
script='train.py',
|
||||
compute_target='cpu-cluster')
|
||||
|
||||
# set up pytorch environment
|
||||
env = Environment.from_conda_specification(
|
||||
name='pytorch-env',
|
||||
file_path='./environments/pytorch-env.yml'
|
||||
)
|
||||
config.run_config.environment = env
|
||||
|
||||
run = experiment.submit(config)
|
||||
|
||||
aml_url = run.get_portal_url()
|
||||
print(aml_url)
|
||||
7
tutorials/get-started-day1/IDE-users/05-upload-data.py
Normal file
7
tutorials/get-started-day1/IDE-users/05-upload-data.py
Normal file
@@ -0,0 +1,7 @@
|
||||
# 05-upload-data.py
|
||||
from azureml.core import Workspace
|
||||
ws = Workspace.from_config()
|
||||
datastore = ws.get_default_datastore()
|
||||
datastore.upload(src_dir='./data',
|
||||
target_path='datasets/cifar10',
|
||||
overwrite=True)
|
||||
35
tutorials/get-started-day1/IDE-users/06-run-pytorch-data.py
Normal file
35
tutorials/get-started-day1/IDE-users/06-run-pytorch-data.py
Normal file
@@ -0,0 +1,35 @@
|
||||
# 06-run-pytorch-data.py
|
||||
from azureml.core import Workspace
|
||||
from azureml.core import Experiment
|
||||
from azureml.core import Environment
|
||||
from azureml.core import ScriptRunConfig
|
||||
from azureml.core import Dataset
|
||||
|
||||
if __name__ == "__main__":
|
||||
ws = Workspace.from_config()
|
||||
datastore = ws.get_default_datastore()
|
||||
dataset = Dataset.File.from_files(path=(datastore, 'datasets/cifar10'))
|
||||
|
||||
experiment = Experiment(workspace=ws, name='day1-experiment-data')
|
||||
|
||||
config = ScriptRunConfig(
|
||||
source_directory='./src',
|
||||
script='train.py',
|
||||
compute_target='cpu-cluster',
|
||||
arguments=[
|
||||
'--data_path', dataset.as_named_input('input').as_mount(),
|
||||
'--learning_rate', 0.003,
|
||||
'--momentum', 0.92],
|
||||
)
|
||||
# set up pytorch environment
|
||||
env = Environment.from_conda_specification(
|
||||
name='pytorch-env',
|
||||
file_path='./environments/pytorch-env.yml'
|
||||
)
|
||||
config.run_config.environment = env
|
||||
|
||||
run = experiment.submit(config)
|
||||
aml_url = run.get_portal_url()
|
||||
print("Submitted to compute cluster. Click link below")
|
||||
print("")
|
||||
print(aml_url)
|
||||
25
tutorials/get-started-day1/IDE-users/README.md
Normal file
25
tutorials/get-started-day1/IDE-users/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Get Started (day 1) with Azure Machine Learning: IDE Users
|
||||
|
||||
This folder has been setup for IDE user (for example, VS Code or Pycharm) following the [Get started (day 1) with Azure Machine Learning tutorial series](https://aka.ms/day1aml).
|
||||
|
||||
The directory is structured as follows:
|
||||
|
||||
```Text
|
||||
IDE-users
|
||||
└──environments
|
||||
| └──pytorch-env.yml
|
||||
└──src
|
||||
| └──hello.py
|
||||
| └──model.py
|
||||
| └──train.py
|
||||
└──01-create-workspace.py
|
||||
└──02-create-compute.py
|
||||
└──03-run-hello.py
|
||||
└──04-run-pytorch.py
|
||||
└──05-upload-data.py
|
||||
└──06-run-pytorch-data.py
|
||||
```
|
||||
|
||||
Please refer to [the documentation](https://aka.ms/day1aml) for more details on these files.
|
||||
|
||||

|
||||
@@ -0,0 +1,9 @@
|
||||
|
||||
name: pytorch-env
|
||||
channels:
|
||||
- defaults
|
||||
- pytorch
|
||||
dependencies:
|
||||
- python=3.6.2
|
||||
- pytorch
|
||||
- torchvision
|
||||
2
tutorials/get-started-day1/IDE-users/src/hello.py
Normal file
2
tutorials/get-started-day1/IDE-users/src/hello.py
Normal file
@@ -0,0 +1,2 @@
|
||||
|
||||
print("hello world!")
|
||||
22
tutorials/get-started-day1/IDE-users/src/model.py
Normal file
22
tutorials/get-started-day1/IDE-users/src/model.py
Normal file
@@ -0,0 +1,22 @@
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
class Net(nn.Module):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.conv1 = nn.Conv2d(3, 6, 5)
|
||||
self.pool = nn.MaxPool2d(2, 2)
|
||||
self.conv2 = nn.Conv2d(6, 16, 5)
|
||||
self.fc1 = nn.Linear(16 * 5 * 5, 120)
|
||||
self.fc2 = nn.Linear(120, 84)
|
||||
self.fc3 = nn.Linear(84, 10)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.pool(F.relu(self.conv1(x)))
|
||||
x = self.pool(F.relu(self.conv2(x)))
|
||||
x = x.view(-1, 16 * 5 * 5)
|
||||
x = F.relu(self.fc1(x))
|
||||
x = F.relu(self.fc2(x))
|
||||
x = self.fc3(x)
|
||||
return x
|
||||
52
tutorials/get-started-day1/IDE-users/src/train.py
Normal file
52
tutorials/get-started-day1/IDE-users/src/train.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import torch
|
||||
import torch.optim as optim
|
||||
import torchvision
|
||||
import torchvision.transforms as transforms
|
||||
|
||||
from model import Net
|
||||
|
||||
# download CIFAR 10 data
|
||||
trainset = torchvision.datasets.CIFAR10(
|
||||
root="./data",
|
||||
train=True,
|
||||
download=True,
|
||||
transform=torchvision.transforms.ToTensor(),
|
||||
)
|
||||
trainloader = torch.utils.data.DataLoader(
|
||||
trainset, batch_size=4, shuffle=True, num_workers=2
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
# define convolutional network
|
||||
net = Net()
|
||||
|
||||
# set up pytorch loss / optimizer
|
||||
criterion = torch.nn.CrossEntropyLoss()
|
||||
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
|
||||
|
||||
# train the network
|
||||
for epoch in range(2):
|
||||
|
||||
running_loss = 0.0
|
||||
for i, data in enumerate(trainloader, 0):
|
||||
# unpack the data
|
||||
inputs, labels = data
|
||||
|
||||
# zero the parameter gradients
|
||||
optimizer.zero_grad()
|
||||
|
||||
# forward + backward + optimize
|
||||
outputs = net(inputs)
|
||||
loss = criterion(outputs, labels)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# print statistics
|
||||
running_loss += loss.item()
|
||||
if i % 2000 == 1999:
|
||||
loss = running_loss / 2000
|
||||
print(f"epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}")
|
||||
running_loss = 0.0
|
||||
|
||||
print("Finished Training")
|
||||
Reference in New Issue
Block a user