update samples from Release-86 as a part of 1.28.0 SDK stable release

This commit is contained in:
amlrelsa-ms
2021-05-10 18:01:38 +00:00
parent 441a5b0141
commit a243beee5e
117 changed files with 451 additions and 2252 deletions

View File

@@ -16,16 +16,14 @@ The following tutorials are intended to provide an introductory overview of Azur
| Tutorial | Description | Notebook | Task | Framework |
| --- | --- | --- | --- | --- |
| Azure Machine Learning in 10 minutes | Learn how to create and attach compute instances to notebooks, run an image classification model, track model metrics, and deploy a model| [quickstart](quickstart/azureml-quickstart.ipynb) | Learn Azure Machine Learning Concepts | PyTorch
| [Get Started (day1)](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup-local) | Learn the fundamental concepts of Azure Machine Learning to help onboard your existing code to Azure Machine Learning. This tutorial focuses heavily on submitting machine learning jobs to scalable cloud-based compute clusters. | [get-started-day1](get-started-day1/day1-part1-setup.ipynb) | Learn Azure Machine Learning Concepts | PyTorch
| [Train your first ML Model](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-train) | Learn the foundational design patterns in Azure Machine Learning and train a scikit-learn model based on a diabetes data set. | [tutorial-quickstart-train-model.ipynb](create-first-ml-experiment/tutorial-1st-experiment-sdk-train.ipynb) | Regression | Scikit-Learn
| [Train an image classification model](https://docs.microsoft.com/azure/machine-learning/tutorial-train-models-with-aml) | Train a scikit-learn image classification model. | [img-classification-part1-training.ipynb](image-classification-mnist-data/img-classification-part1-training.ipynb) | Image Classification | Scikit-Learn
| [Deploy an image classification model](https://docs.microsoft.com/azure/machine-learning/tutorial-deploy-models-with-aml) | Deploy a scikit-learn image classification model to Azure Container Instances. | [img-classification-part2-deploy.ipynb](image-classification-mnist-data/img-classification-part2-deploy.ipynb) | Image Classification | Scikit-Learn
| [Deploy an encrypted inferencing service](https://docs.microsoft.com/azure/machine-learning/tutorial-deploy-models-with-aml) |Deploy an image classification model for encrypted inferencing in Azure Container Instances | [img-classification-part3-deploy-encrypted.ipynb](image-classification-mnist-data/img-classification-part3-deploy-encrypted.ipynb) | Image Classification | Scikit-Learn
| [Use automated machine learning to predict taxi fares](https://docs.microsoft.com/azure/machine-learning/tutorial-auto-train-models) | Train a regression model to predict taxi fares using Automated Machine Learning. | [regression-part2-automated-ml.ipynb](regression-automl-nyc-taxi-data/regression-automated-ml.ipynb) | Regression | Automated ML
| Azure ML in 10 minutes, to be run on a Compute Instance |Learn how to run an image classification model, track model metrics, and deploy a model in 10 minutes. | [AzureMLIn10mins.ipynb](quickstart-ci/AzureMLIn10mins.ipynb) | Image Classification | Scikit-Learn |
| Get started with Azure ML Job Submission, to be run on a Compute Instance |Learn how to use the Azure Machine Learning Python SDK to submit batch jobs. | [GettingStartedWithPythonSDK.ipynb](quickstart-ci/GettingStartedWithPythonSDK.ipynb) | Image Classification | Scikit-Learn |
| Get started with Automated ML, to be run on a Compute Instance | Learn how to use Automated ML for Fraud classification. | [ClassificationWithAutomatedML.ipynb](quickstart-ci/ClassificationWithAutomatedML.ipynb) | Classification | Automated ML |
| Azure ML in 10 minutes (Compute instance required) |Learn how to run an image classification model, track model metrics, and deploy a model in 10 minutes. | [quickstart-azureml-in-10mins.ipynb](compute-instance-quickstarts/quickstart-azureml-in-10mins/quickstart-azureml-in-10mins.ipynb) | Image Classification | Scikit-Learn |
| Get started with Azure ML Job Submission (Compute instance required) |Learn how to use the Azure Machine Learning Python SDK to submit batch jobs. | [quickstart-azureml-python-sdk.ipynb](compute-instance-quickstarts/quickstart-azureml-python-sdk/quickstart-azureml-python-sdk.ipynb) | Image Classification | Scikit-Learn |
| Get started with Automated ML (Compute instance required) | Learn how to use Automated ML for Fraud classification. | [quickstart-azureml-automl.ipynb](compute-instance-quickstarts/quickstart-azureml-automl/quickstart-azureml-automl.ipynb) | Classification | Automated ML |
## Advanced Samples

View File

@@ -488,18 +488,11 @@
"pygments_lexer": "ipython3",
"version": "3.6.9"
},
"microsoft": {
"host": {
"AzureML": {
"notebookHasBeenCompleted": true
}
}
},
"notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License.",
"nteract": {
"version": "nteract-front-end@1.0.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

View File

@@ -1,4 +1,4 @@
name: day1-part1-setup
name: quickstart-azureml-automl
dependencies:
- pip:
- azureml-sdk

View File

@@ -625,7 +625,7 @@
"\n",
"Now that you have working code in a development environment, learn how to submit a **_job_** - ideally on a schedule or trigger (for example, arrival of new data).\n",
"\n",
" [**Learn how to get started with Azure ML Job Submission**](GettingStartedWithPythonSDK.ipynb) "
" [**Learn how to get started with Azure ML Job Submission**](../quickstart-azureml-python-sdk/quickstart-azureml-python-sdk.ipynb) "
]
}
],
@@ -637,7 +637,7 @@
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python36",
"language": "python",
"name": "python36"
},
"language_info": {
@@ -650,14 +650,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
},
"microsoft": {
"host": {
"AzureML": {
"notebookHasBeenCompleted": true
}
}
"version": "3.6.9"
},
"notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License.",
"nteract": {
@@ -665,5 +658,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

View File

@@ -1,4 +1,4 @@
name: GettingStartedWithPythonSDK
name: quickstart-azureml-in-10mins
dependencies:
- pip:
- azureml-sdk

View File

@@ -67,17 +67,16 @@
},
"outputs": [],
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"\n",
"import azureml.core\n",
"from azureml.core import Workspace\n",
"from azureml.core import Experiment\n",
"\n",
"# connect to your workspace\n",
"ws = Workspace.from_config()\n",
"\n",
"experiment_name = \"get-started-with-jobsubmission-tutorial\"\n",
"import numpy as np\r\n",
"import matplotlib.pyplot as plt\r\n",
"\r\n",
"from azureml.core import Workspace\r\n",
"from azureml.core import Experiment\r\n",
"\r\n",
"# connect to your workspace\r\n",
"ws = Workspace.from_config()\r\n",
"\r\n",
"experiment_name = \"get-started-with-jobsubmission-tutorial\"\r\n",
"exp = Experiment(workspace=ws, name=experiment_name)"
]
},
@@ -175,55 +174,55 @@
},
"outputs": [],
"source": [
"# make sure utils.py is in the same directory as this code\n",
"from utils import load_data\n",
"import glob\n",
"\n",
"\n",
"# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the model converge faster.\n",
"X_train = (\n",
" load_data(\n",
" glob.glob(\n",
" os.path.join(data_folder, \"**/train-images-idx3-ubyte.gz\"), recursive=True\n",
" )[0],\n",
" False,\n",
" )\n",
" / 255.0\n",
")\n",
"X_test = (\n",
" load_data(\n",
" glob.glob(\n",
" os.path.join(data_folder, \"**/t10k-images-idx3-ubyte.gz\"), recursive=True\n",
" )[0],\n",
" False,\n",
" )\n",
" / 255.0\n",
")\n",
"y_train = load_data(\n",
" glob.glob(\n",
" os.path.join(data_folder, \"**/train-labels-idx1-ubyte.gz\"), recursive=True\n",
" )[0],\n",
" True,\n",
").reshape(-1)\n",
"y_test = load_data(\n",
" glob.glob(\n",
" os.path.join(data_folder, \"**/t10k-labels-idx1-ubyte.gz\"), recursive=True\n",
" )[0],\n",
" True,\n",
").reshape(-1)\n",
"\n",
"\n",
"# now let's show some randomly chosen images from the training set.\n",
"count = 0\n",
"sample_size = 30\n",
"plt.figure(figsize=(16, 6))\n",
"for i in np.random.permutation(X_train.shape[0])[:sample_size]:\n",
" count = count + 1\n",
" plt.subplot(1, sample_size, count)\n",
" plt.axhline(\"\")\n",
" plt.axvline(\"\")\n",
" plt.text(x=10, y=-10, s=y_train[i], fontsize=18)\n",
" plt.imshow(X_train[i].reshape(28, 28), cmap=plt.cm.Greys)\n",
"# make sure utils.py is in the same directory as this code\r\n",
"from src.utils import load_data\r\n",
"import glob\r\n",
"\r\n",
"\r\n",
"# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the model converge faster.\r\n",
"X_train = (\r\n",
" load_data(\r\n",
" glob.glob(\r\n",
" os.path.join(data_folder, \"**/train-images-idx3-ubyte.gz\"), recursive=True\r\n",
" )[0],\r\n",
" False,\r\n",
" )\r\n",
" / 255.0\r\n",
")\r\n",
"X_test = (\r\n",
" load_data(\r\n",
" glob.glob(\r\n",
" os.path.join(data_folder, \"**/t10k-images-idx3-ubyte.gz\"), recursive=True\r\n",
" )[0],\r\n",
" False,\r\n",
" )\r\n",
" / 255.0\r\n",
")\r\n",
"y_train = load_data(\r\n",
" glob.glob(\r\n",
" os.path.join(data_folder, \"**/train-labels-idx1-ubyte.gz\"), recursive=True\r\n",
" )[0],\r\n",
" True,\r\n",
").reshape(-1)\r\n",
"y_test = load_data(\r\n",
" glob.glob(\r\n",
" os.path.join(data_folder, \"**/t10k-labels-idx1-ubyte.gz\"), recursive=True\r\n",
" )[0],\r\n",
" True,\r\n",
").reshape(-1)\r\n",
"\r\n",
"\r\n",
"# now let's show some randomly chosen images from the training set.\r\n",
"count = 0\r\n",
"sample_size = 30\r\n",
"plt.figure(figsize=(16, 6))\r\n",
"for i in np.random.permutation(X_train.shape[0])[:sample_size]:\r\n",
" count = count + 1\r\n",
" plt.subplot(1, sample_size, count)\r\n",
" plt.axhline(\"\")\r\n",
" plt.axvline(\"\")\r\n",
" plt.text(x=10, y=-10, s=y_train[i], fontsize=18)\r\n",
" plt.imshow(X_train[i].reshape(28, 28), cmap=plt.cm.Greys)\r\n",
"plt.show()"
]
},
@@ -274,7 +273,7 @@
},
"outputs": [],
"source": [
"with open(\"sklearn-mnist-batch/train.py\", \"r\") as f:\n",
"with open(\"./src/train.py\", \"r\") as f:\n",
" print(f.read())"
]
},
@@ -375,8 +374,8 @@
}
},
"source": [
"Create a [ScriptRunConfig](https://docs.microsoft.com/python/api/azureml-core/azureml.core.scriptrunconfig?preserve-view=true&view=azure-ml-py) object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on. A script run configuration is used to configure the information necessary for submitting a training run as part of an experiment. \n",
"\n",
"Create a [ScriptRunConfig](https://docs.microsoft.com/python/api/azureml-core/azureml.core.scriptrunconfig?preserve-view=true&view=azure-ml-py) object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on. A script run configuration is used to configure the information necessary for submitting a training run as part of an experiment. In this case we will run this on a 'local' compute target, which is the compute instance you are running this notebook on.\r\n",
"\r\n",
"Read more about configuring and submitting training runs [here](https://docs.microsoft.com/azure/machine-learning/how-to-set-up-training-targets). "
]
},
@@ -403,9 +402,8 @@
"\n",
"args = [\"--data-folder\", mnist_file_dataset.as_mount(), \"--regularization\", 0.5]\n",
"\n",
"script_folder = \"sklearn-mnist-batch\"\n",
"src = ScriptRunConfig(\n",
" source_directory=script_folder,\n",
" source_directory=\"src\",\n",
" script=\"train.py\",\n",
" arguments=args,\n",
" compute_target=\"local\",\n",
@@ -673,7 +671,7 @@
"\n",
"In this quickstart, you have seen how to run jobs-based machine learning code in Azure Machine Learning. \n",
"\n",
"It is also possible to use automated machine learning in Azure Machine Learning service to find the best model in an automated fashion. To see how this works, we recommend that you follow the next quickstart in this series, [**Fraud Classification using Automated ML**](ClassificationWithAutomatedML.ipynb). This quickstart is focused on AutoML using the Python SDK."
"It is also possible to use automated machine learning in Azure Machine Learning service to find the best model in an automated fashion. To see how this works, we recommend that you follow the next quickstart in this series, [**Fraud Classification using Automated ML**](../quickstart-azureml-automl/quickstart-azureml-automl.ipynb). This quickstart is focused on AutoML using the Python SDK."
]
}
],
@@ -706,5 +704,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

View File

@@ -1,4 +1,4 @@
name: AzureMLIn10mins
name: quickstart-azureml-python-sdk
dependencies:
- pip:
- azureml-sdk
@@ -9,3 +9,4 @@ dependencies:
- uuid
- requests
- azureml-opendatasets
- azureml-widgets

View File

@@ -1,12 +0,0 @@
# 01-create-workspace.py
from azureml.core import Workspace
# Example locations: 'westeurope' or 'eastus2' or 'westus2' or 'southeastasia'.
ws = Workspace.create(name='<my_workspace_name>',
subscription_id='<azure-subscription-id>',
resource_group='<myresourcegroup>',
create_resource_group=True,
location='<NAME_OF_REGION>')
# write out the workspace details to a configuration file: .azureml/config.json
ws.write_config(path='.azureml')

View File

@@ -1,23 +0,0 @@
# 02-create-compute.py
from azureml.core import Workspace
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
ws = Workspace.from_config()
# Choose a name for your CPU cluster
cpu_cluster_name = "cpu-cluster"
# Verify that cluster does not exist already
try:
cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
cfg = AmlCompute.provisioning_configuration(
vm_size='STANDARD_D2_V2',
max_nodes=4,
idle_seconds_before_scaledown=2400
)
cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, cfg)
cpu_cluster.wait_for_completion(show_output=True)

View File

@@ -1,13 +0,0 @@
# 03-run-hello.py
from azureml.core import Workspace, Experiment, ScriptRunConfig
ws = Workspace.from_config()
experiment = Experiment(workspace=ws, name='day1-experiment-hello')
config = ScriptRunConfig(source_directory='./src',
script='hello.py',
compute_target='cpu-cluster')
run = experiment.submit(config)
aml_url = run.get_portal_url()
print(aml_url)

View File

@@ -1,24 +0,0 @@
# 04-run-pytorch.py
from azureml.core import Workspace
from azureml.core import Experiment
from azureml.core import Environment
from azureml.core import ScriptRunConfig
if __name__ == "__main__":
ws = Workspace.from_config()
experiment = Experiment(workspace=ws, name='day1-experiment-train')
config = ScriptRunConfig(source_directory='./src',
script='train.py',
compute_target='cpu-cluster')
# set up pytorch environment
env = Environment.from_conda_specification(
name='pytorch-env',
file_path='./environments/pytorch-env.yml'
)
config.run_config.environment = env
run = experiment.submit(config)
aml_url = run.get_portal_url()
print(aml_url)

View File

@@ -1,7 +0,0 @@
# 05-upload-data.py
from azureml.core import Workspace
ws = Workspace.from_config()
datastore = ws.get_default_datastore()
datastore.upload(src_dir='./data',
target_path='datasets/cifar10',
overwrite=True)

View File

@@ -1,35 +0,0 @@
# 06-run-pytorch-data.py
from azureml.core import Workspace
from azureml.core import Experiment
from azureml.core import Environment
from azureml.core import ScriptRunConfig
from azureml.core import Dataset
if __name__ == "__main__":
ws = Workspace.from_config()
datastore = ws.get_default_datastore()
dataset = Dataset.File.from_files(path=(datastore, 'datasets/cifar10'))
experiment = Experiment(workspace=ws, name='day1-experiment-data')
config = ScriptRunConfig(
source_directory='./src',
script='train.py',
compute_target='cpu-cluster',
arguments=[
'--data_path', dataset.as_named_input('input').as_mount(),
'--learning_rate', 0.003,
'--momentum', 0.92],
)
# set up pytorch environment
env = Environment.from_conda_specification(
name='pytorch-env',
file_path='./environments/pytorch-env.yml'
)
config.run_config.environment = env
run = experiment.submit(config)
aml_url = run.get_portal_url()
print("Submitted to compute cluster. Click link below")
print("")
print(aml_url)

View File

@@ -1,25 +0,0 @@
# Get Started (day 1) with Azure Machine Learning: IDE Users
This folder has been setup for IDE user (for example, VS Code or Pycharm) following the [Get started (day 1) with Azure Machine Learning tutorial series](https://aka.ms/day1aml).
The directory is structured as follows:
```Text
IDE-users
└──environments
| └──pytorch-env.yml
└──src
| └──hello.py
| └──model.py
| └──train.py
└──01-create-workspace.py
└──02-create-compute.py
└──03-run-hello.py
└──04-run-pytorch.py
└──05-upload-data.py
└──06-run-pytorch-data.py
```
Please refer to [the documentation](https://aka.ms/day1aml) for more details on these files.
![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/get-started-day1/IDE/README.png)

View File

@@ -1,9 +0,0 @@
name: pytorch-env
channels:
- defaults
- pytorch
dependencies:
- python=3.6.2
- pytorch
- torchvision

View File

@@ -1,2 +0,0 @@
print("hello world!")

View File

@@ -1,22 +0,0 @@
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x

View File

@@ -1,52 +0,0 @@
import torch
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from model import Net
# download CIFAR 10 data
trainset = torchvision.datasets.CIFAR10(
root="./data",
train=True,
download=True,
transform=torchvision.transforms.ToTensor(),
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=4, shuffle=True, num_workers=2
)
if __name__ == "__main__":
# define convolutional network
net = Net()
# set up pytorch loss / optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# train the network
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# unpack the data
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999:
loss = running_loss / 2000
print(f"epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}")
running_loss = 0.0
print("Finished Training")

View File

@@ -1,2 +0,0 @@
print("hello world!")

View File

@@ -1,22 +0,0 @@
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x

View File

@@ -1,62 +0,0 @@
import torch
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from model import Net
from azureml.core import Run
# ADDITIONAL CODE: get AML run from the current context
run = Run.get_context()
# download CIFAR 10 data
trainset = torchvision.datasets.CIFAR10(
root='./data',
train=True,
download=True,
transform=torchvision.transforms.ToTensor()
)
trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=4,
shuffle=True,
num_workers=2
)
if __name__ == "__main__":
# define convolutional network
net = Net()
# set up pytorch loss / optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# train the network
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# unpack the data
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999:
loss = running_loss / 2000
# ADDITIONAL CODE: log loss metric to AML
run.log('loss', loss)
print(f'epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}')
running_loss = 0.0
print('Finished Training')

View File

@@ -1,22 +0,0 @@
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x

View File

@@ -1,52 +0,0 @@
import torch
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from model import Net
# download CIFAR 10 data
trainset = torchvision.datasets.CIFAR10(
root="./data",
train=True,
download=True,
transform=torchvision.transforms.ToTensor(),
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=4, shuffle=True, num_workers=2
)
if __name__ == "__main__":
# define convolutional network
net = Net()
# set up pytorch loss / optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# train the network
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# unpack the data
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999:
loss = running_loss / 2000
print(f"epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}")
running_loss = 0.0
print("Finished Training")

View File

@@ -1,22 +0,0 @@
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x

View File

@@ -1,96 +0,0 @@
import os
import argparse
import torch
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from model import Net
from azureml.core import Run
run = Run.get_context()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_path',
type=str,
help='Path to the training data'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.001,
help='Learning rate for SGD'
)
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='Momentum for SGD'
)
args = parser.parse_args()
print("===== DATA =====")
print("DATA PATH: " + args.data_path)
print("LIST FILES IN DATA PATH...")
print(os.listdir(args.data_path))
print("================")
# prepare DataLoader for CIFAR10 data
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
trainset = torchvision.datasets.CIFAR10(
root=args.data_path,
train=True,
download=False,
transform=transform,
)
trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=4,
shuffle=True,
num_workers=2
)
# define convolutional network
net = Net()
# set up pytorch loss / optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(
net.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
)
# train the network
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# unpack the data
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999:
loss = running_loss / 2000
run.log('loss', loss) # log loss metric to AML
print(f'epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}')
running_loss = 0.0
print('Finished Training')

View File

@@ -1,11 +0,0 @@
name: pytorch-aml-env
channels:
- defaults
- pytorch
dependencies:
- python=3.6.2
- pytorch
- torchvision
- pip
- pip:
- azureml-sdk

View File

@@ -1,9 +0,0 @@
name: pytorch-env
channels:
- defaults
- pytorch
dependencies:
- python=3.6.2
- pytorch
- torchvision

View File

@@ -1,166 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/day1-part1-setup.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Tutorial: Get started (day 1) with Azure Machine Learning (Part 1 of 4)\n",
"\n",
"---\n",
"## Introduction <a id='intro'></a>\n",
"\n",
"In this **four-part tutorial series**, you will learn the fundamentals of Azure Machine Learning and complete jobs-based Python machine learning tasks in the Azure cloud, including:\n",
"\n",
"1. Set up a compute cluster\n",
"2. Run code in the cloud using Azure Machine Learning's Python SDK.\n",
"3. Manage the Python environment you use for model training.\n",
"4. Upload data to Azure and consume that data in training.\n",
"\n",
"In this first part of the tutorial series you learn how to create an Azure Machine Learning Compute Cluster that will be used in subsequent parts of the series to submit jobs to. This notebook follows the steps provided on the [Python (day 1) - set up local computer documentation page](https://aka.ms/day1aml).\n",
"\n",
"## Pre-requisites <a id='pre-reqs'></a>\n",
"\n",
"- An Azure Subscription. If you don't have an Azure subscription, create a free account before you begin. Try [Azure Machine Learning](https://aka.ms/AMLFree) today.\n",
"- Familiarity with Python and Machine Learning concepts. For example, environments, training, scoring, and so on.\n",
"- If you are using a compute instance in Azure Machine Learning to run this notebook series, you are all set. Otherwise, please follow the [Configure a development environment for Azure Machine Learning](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment)\n",
"\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Ensure you have the latest Azure Machine Learning Python SDK\n",
"\n",
"This tutorial series depends on having the Azure Machine Learning SDK version 1.14.0 onwards installed. You can check your version using the code cell below."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import VERSION\n",
"\n",
"print ('Version: ' + VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If your version is below 1.14.0, then upgrade the SDK using `pip` (**Note: You may need to restart your kernel for the changes to take effect. Re-run the cell above to ensure you have the right version**).\n",
"\n",
"```bash\n",
"!pip install -U azureml-sdk\n",
"```"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create an Azure Machine Learning compute cluster <a id='createcc'></a>\n",
"\n",
"As this tutorial focuses on jobs-based machine learning tasks, you will be submitting python code to run on an Azure Machine Learning **Compute cluster**, which is well suited for large jobs and production. Therefore, you create an Azure Machine Learning compute cluster that will auto-scale between zero and four nodes:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"create mlc",
"batchai"
]
},
"outputs": [],
"source": [
"from azureml.core import Workspace\n",
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"ws = Workspace.from_config() # this automatically looks for a directory .azureml\n",
"\n",
"# Choose a name for your CPU cluster\n",
"cpu_cluster_name = \"cpu-cluster\"\n",
"\n",
"# Verify that cluster does not exist already\n",
"try:\n",
" cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print('Found existing cluster, use it.')\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
" max_nodes=4, \n",
" idle_seconds_before_scaledown=2400)\n",
" cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"\n",
"cpu_cluster.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"> <span style=\"color:darkblue;font-weight:bold\"> ! INFORMATION \n",
"> When the cluster has been created it will have 0 nodes provisioned. Therefore, the cluster does not incur costs until you submit a job. This cluster will scale down when it has been idle for 2400 seconds (40 minutes).</span>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next Steps\n",
"\n",
"In the next tutorial, you walk through submitting a script to the Azure Machine Learning compute cluster.\n",
"\n",
"[Tutorial: Run \"Hello World\" Python Script on Azure](day1-part2-hello-world.ipynb)\n"
]
}
],
"metadata": {
"authors": [
{
"name": "samkemp"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
},
"notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License."
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -1,204 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/get-started-day1/day1-part2-hello-world.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Tutorial: \"Hello World\" (Part 2 of 4)\n",
"\n",
"---\n",
"## Introduction\n",
"In **part 2 of this get started series**, you will submit a trivial \"hello world\" python script to the cloud by:\n",
"\n",
"- Running Python code in the cloud with Azure Machine Learning SDK\n",
"- Switching between debugging locally on a compute instance.\n",
"- Submitting remote runs in the cloud\n",
"- Monitoring and recording runs in the Azure Machine Learning studio\n",
"\n",
"This notebook follows the steps provided on the [Python (day 1) - \"hello world\" documentation page](https://aka.ms/day1aml). This tutorial is part of a **four-part tutorial series** in which you learn the fundamentals of Azure Machine Learning and complete simple jobs-based machine learning tasks in the Azure cloud. It builds off the work you completed in [Tutorial part 1: set up an Azure Machine Learning compute cluster](day1-part1-setup.ipynb).\n",
"\n",
"## Pre-requisites\n",
"\n",
"- Complete [Tutorial part 1: set up an Azure Machine Learning compute cluster](day1-part1-setup.ipynb) if you don't already have an Azure Machine Learning compute cluster.\n",
"- Familiarity with Python and Machine Learning concepts.\n",
"- If you are using a compute instance in Azure Machine Learning to run this notebook series, you are all set. Otherwise, please follow the [Configure a development environment for Azure Machine Learning](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment)\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Your code\n",
"\n",
"In the `code/hello` subdirectory you will find a trivial python script [hello.py](code/hello/hello.py) that has the following code:\n",
"\n",
"```Python\n",
"# code/hello/hello.py\n",
"print(\"hello world!\")\n",
"```\n",
"\n",
"In this tutorial you are going to submit this trivial python script to an Azure Machine Learning Compute Cluster."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Test in your development environment\n",
"\n",
"You can test your code works on a compute instance or locally (for example, a laptop), which has the benefit of interactive debugging of code:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"!python code/hello/hello.py"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Submit your code to Azure Machine Learning\n",
"\n",
"Below you create a __*control script*__ this is where you specify _how_ your code is submitted to Azure Machine Learning. The code you submit to Azure Machine Learning (in this case `hello.py`) does not need anything specific to Azure Machine Learning - it can be any valid Python code. It is only the control script that is Azure Machine Learning specific.\n",
"\n",
"The code below will show a Jupyter widget that tracks the progress of your run, and displays logs.\n",
"\n",
"> <span style=\"color:purple; font-weight:bold\">! NOTE <br>\n",
"> The very first run will take 5-10minutes to complete. This is because in the background a docker image is built in the cloud, the compute cluster is resized from 0 to 1 node, and the docker image is downloaded to the compute. Subsequent runs are much quicker (~15 seconds) as the docker image is cached on the compute - you can test this by resubmitting the code below after the first run has completed.</span>"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"remote run",
"batchai",
"configure run",
"use notebook widget"
]
},
"outputs": [],
"source": [
"from azureml.core import Workspace, Experiment, ScriptRunConfig\n",
"from azureml.widgets import RunDetails\n",
"\n",
"ws = Workspace.from_config()\n",
"experiment = Experiment(workspace=ws, name='day1-experiment-hello')\n",
"\n",
"config = ScriptRunConfig(source_directory='./code/hello', script='hello.py', compute_target='cpu-cluster')\n",
"\n",
"run = experiment.submit(config)\n",
"RunDetails(run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Understanding the control code\n",
"\n",
"| Code |Description | \n",
"|---|---|\n",
"| `ws = Workspace.from_config()` | [Workspace](https://docs.microsoft.com/python/api/azureml-core/azureml.core.workspace.workspace?view=azure-ml-py&preserve-view=true) connects to your Azure Machine Learning workspace, so that you can communicate with your Azure Machine Learning resources. |\n",
"| `experiment = Experiment( ... )` | [Experiment](https://docs.microsoft.com/python/api/azureml-core/azureml.core.experiment.experiment?view=azure-ml-py&preserve-view=true) provides a simple way to organize multiple runs under a single name. <br>Later you can see how experiments make it easy to compare metrics between dozens of runs. |\n",
"| `config = ScriptRunConfig( ... )` | [ScriptRunConfig](https://docs.microsoft.com/python/api/azureml-core/azureml.core.scriptrunconfig?view=azure-ml-py&preserve-view=true) wraps your `hello.py` code and passes it to your workspace.<br> As the name suggests, you can use this class to _configure_ how you want your _script_ to _run_ in Azure Machine Learning. <br>Also specifies what compute target the script will run on. <br>In this code, the target is the compute cluster you created in the [setup tutorial](tutorial-1st-experiment-sdk-setup-local.md). |\n",
"| `run = experiment.submit(config)` | Submits your script. This submission is called a [Run](https://docs.microsoft.com/python/api/azureml-core/azureml.core.run(class)?view=azure-ml-py&preserve-view=true). <br>A run encapsulates a single execution of your code. Use a run to monitor the script progress, capture the output,<br> analyze the results, visualize metrics and more. |\n",
"| `aml_url = run.get_portal_url()` | The `run` object provides a handle on the execution of your code. Monitor its progress from <br> the Azure Machine Learning Studio with the URL that is printed from the python script. |\n",
"|`RunDetails(run).show()` | There is an Azure Machine Learning widget that shows the progress of your job along with streaming the log files.\n",
"\n",
"## View the logs\n",
"\n",
"The widget has a dropdown box titled **Output logs** select `70_driver_log.txt`, which shows the following standard output: \n",
"\n",
"```\n",
" 1: [2020-08-04T22:15:44.407305] Entering context manager injector.\n",
" 2: [context_manager_injector.py] Command line Options: Namespace(inject=['ProjectPythonPath:context_managers.ProjectPythonPath', 'RunHistory:context_managers.RunHistory', 'TrackUserError:context_managers.TrackUserError', 'UserExceptions:context_managers.UserExceptions'], invocation=['hello.py'])\n",
" 3: Starting the daemon thread to refresh tokens in background for process with pid = 31263\n",
" 4: Entering Run History Context Manager.\n",
" 5: Preparing to call script [ hello.py ] with arguments: []\n",
" 6: After variable expansion, calling script [ hello.py ] with arguments: []\n",
" 7:\n",
" 8: Hello world!\n",
" 9: Starting the daemon thread to refresh tokens in background for process with pid = 31263\n",
"10:\n",
"11:\n",
"12: The experiment completed successfully. Finalizing run...\n",
"13: Logging experiment finalizing status in history service.\n",
"14: [2020-08-04T22:15:46.541334] TimeoutHandler __init__\n",
"15: [2020-08-04T22:15:46.541396] TimeoutHandler __enter__\n",
"16: Cleaning up all outstanding Run operations, waiting 300.0 seconds\n",
"17: 1 items cleaning up...\n",
"18: Cleanup took 0.1812913417816162 seconds\n",
"19: [2020-08-04T22:15:47.040203] TimeoutHandler __exit__\n",
"```\n",
"\n",
"On line 8 above, you see the \"Hello world!\" output. The 70_driver_log.txt file contains the standard output from run and can be useful when debugging remote runs in the cloud. You can also view the run by clicking on the **Click here to see the run in Azure Machine Learning studio** link in the wdiget.\n",
"\n",
"## Next steps\n",
"\n",
"In this tutorial, you took a simple \"hello world\" script and ran it on Azure. You saw how to connect to your Azure Machine Learning workspace, create an Experiment, and submit your `hello.py` code to the cloud.\n",
"\n",
"In the [next tutorial](day1-part3-train-model.ipynb), you build on these learnings by running something more interesting than `print(\"Hello world!\")`.\n"
]
}
],
"metadata": {
"authors": [
{
"name": "samkemp"
}
],
"celltoolbar": "Edit Metadata",
"kernel_info": {
"name": "python3-azureml"
},
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
},
"notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License.",
"nteract": {
"version": "nteract-front-end@1.0.0"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -1,5 +0,0 @@
name: day1-part2-hello-world
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -1,289 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/get-started-day1/day1-part3-train-model.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Tutorial: Train your first ML model (Part 3 of 4)\n",
"\n",
"---\n",
"## Introduction\n",
"In the [previous tutorial](day1-part2-hello-world.ipynb), you ran a trivial \"Hello world!\" script in the cloud using Azure Machine Learning's Python SDK. This time you take it a step further by submitting a script that will train a machine learning model. This example will help you understand how Azure Machine Learning eases consistent behavior between debugging on a compute instance or laptop development environment, and remote runs.\n",
"\n",
"Learning these concepts means that by the end of this session, you can:\n",
"\n",
"* Use Conda to define an Azure Machine Learning environment.\n",
"* Train a model in the cloud.\n",
"* Log metrics to Azure Machine Learning.\n",
"\n",
"This notebook follows the steps provided on the [Python (day 1) - train a model documentation page](https://aka.ms/day1aml).\n",
"\n",
"## Prerequisites\n",
"\n",
"- You have completed the following:\n",
" - [Setup on your compute cluster](day1-part1-setup.ipynb)\n",
" - [Tutorial: Hello World example](day1-part2-hello-world.md)\n",
"- Familiarity with Python and Machine Learning concepts\n",
"- If you are using a compute instance in Azure Machine Learning to run this notebook series, you are all set. Otherwise, please follow the [Configure a development environment for Azure Machine Learning](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment)\n",
"---\n",
"\n",
"## Your machine learning code\n",
"\n",
"This tutorial shows you how to train a PyTorch model on the CIFAR 10 dataset using an Azure Machine Learning Cluster. In this case you will be using a CPU cluster, but this could equally be a GPU cluster. Whilst this tutorial uses PyTorch, the steps we show you apply to *any* machine learning code. \n",
"\n",
"In the `code/pytorch-cifar10-train` subdirectory you will see 2 files:\n",
"\n",
"1. [model.py](code/pytorch-cifar10-train/model.py) - this defines the neural network architecture\n",
"1. [train.py](code/pytorch-cifar10-train/train.py) - This is the training script. This script downloads the CIFAR10 dataset using PyTorch `torchvision.dataset` APIs, sets up the network defined in\n",
"`model.py`, and trains it for two epochs using standard SGD and cross-entropy loss.\n",
"\n",
"Note the code is based on [this introductory example from PyTorch](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html). "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Define the Python environment for your machine learning code\n",
"\n",
"For demonstration purposes, we're going to use a Conda environment but the steps for a pip virtual environment are almost identical. This environment has all the dependencies that your model and training script require. \n",
"\n",
"In the `configuration` directory there is a *conda dependencies* file called [pytorch-env.yml](configuration/pytorch-env.yml) that specifies the dependencies to run the python code. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Test in your development environment\n",
"\n",
"Test your script runs on either your compute instance or laptop using this environment."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!python code/pytorch-cifar10-train/train.py"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**You should notice that the script has downloaded the data into a directory called `data`.**\n",
"\n",
"## Submit your machine learning code to Azure Machine Learning\n",
"\n",
"The difference to the control script below and the one used to submit \"hello world\" is that you adjust the environment to be set from the conda dependencies file you created earlier.\n",
"\n",
"> <span style=\"color:purple; font-weight:bold\">! NOTE <br>\n",
"> The first time you run this script, Azure Machine Learning will build a new docker image from your PyTorch environment. The whole run could take 5-10 minutes to complete. You can see the docker build logs in the widget by selecting the `20_image_build_log.txt` in the log files dropdown. This image will be reused in future runs making them run much quicker.</span>\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"remote run",
"batchai",
"configure run",
"use notebook widget"
]
},
"outputs": [],
"source": [
"from azureml.core import Workspace, Experiment, Environment, ScriptRunConfig\n",
"from azureml.widgets import RunDetails\n",
"\n",
"ws = Workspace.from_config()\n",
"experiment = Experiment(workspace=ws, name='day1-experiment-train')\n",
"config = ScriptRunConfig(source_directory='code/pytorch-cifar10-train/', script='train.py', compute_target='cpu-cluster')\n",
"\n",
"env = Environment.from_conda_specification(name='pytorch-env', file_path='configuration/pytorch-env.yml')\n",
"config.run_config.environment = env\n",
"\n",
"run = experiment.submit(config)\n",
"\n",
"RunDetails(run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Understand the control code\n",
"\n",
"Compared to the control script that submitted the \"hello world\" example, this control script introduces the following:\n",
"\n",
"| Code | Description\n",
"| --- | --- |\n",
"| `env = Environment.from_conda_specification( ...)` | Azure Machine Learning provides the concept of an `Environment` to represent a reproducible, <br>versioned Python environment for running experiments. Here you have created it from a yaml conda dependencies file.|\n",
"| `config.run_config.environment = env` | adds the environment to the ScriptRunConfig. |\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**There are many ways to create AML environments, including [from a pip requirements.txt](https://docs.microsoft.com/python/api/azureml-core/azureml.core.environment.environment?view=azure-ml-py&preserve-view=true#from-pip-requirements-name--file-path-), or even [from an existing local Conda environment](https://docs.microsoft.com/python/api/azureml-core/azureml.core.environment.environment?view=azure-ml-py&preserve-view=true#from-existing-conda-environment-name--conda-environment-name-).**\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Once your image is built, select `70_driver_log.txt` to see the output of your training script, which should look like:\n",
"\n",
"```txt\n",
"Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ./data/cifar-10-python.tar.gz\n",
"...\n",
"Files already downloaded and verified\n",
"epoch=1, batch= 2000: loss 2.19\n",
"...\n",
"epoch=2, batch=12000: loss 1.27\n",
"Finished Training\n",
"```\n",
"\n",
"Environments can be registered to a workspace with `env.register(ws)`, allowing them to be easily shared, reused, and versioned. Environments make it easy to reproduce previous results and to collaborate with your team.\n",
"\n",
"Azure Machine Learning also maintains a collection of curated environments. These environments cover common ML scenarios and are backed by cached Docker images. Cached Docker images make the first remote run faster.\n",
"\n",
"In short, using registered environments can save you time! More details can be found on the [environments documentation](./how-to-use-environments.md)\n",
"\n",
"## Log training metrics\n",
"\n",
"Now that you have a model training in Azure Machine Learning, start tracking some performance metrics.\n",
"The current training script prints metrics to the terminal. Azure Machine Learning provides a\n",
"mechanism for logging metrics with more functionality. By adding a few lines of code, you gain the ability to visualize metrics in the studio and to compare metrics between multiple runs.\n",
"\n",
"### Machine learning code updates\n",
"\n",
"In the `code/pytorch-cifar10-train-with-logging` directory you will notice the [train.py](code/pytorch-cifar10-train-with-logging/train.py) script has been modified with two additional lines that will log the loss to the Azure Machine Learning Studio:\n",
"\n",
"```python\n",
"# in train.py\n",
"run = Run.get_context()\n",
"...\n",
"run.log('loss', loss)\n",
"```\n",
"\n",
"Metrics in Azure Machine Learning are:\n",
"\n",
"- Organized by experiment and run so it's easy to keep track of and\n",
"compare metrics.\n",
"- Equipped with a UI so we can visualize training performance in the studio or in the notebook widget.\n",
"- **Designed to scale** You can submit concurrent experiments and the Azure Machine Learning cluster will scale out (up to the maximum node count of the cluster) to run the experiments in parallel."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Update the Environment for your machine learning code\n",
"\n",
"The `train.py` script just took a new dependency on `azureml.core`. Therefore, the conda dependecies file [pytorch-aml-env](configuration/pytorch-aml-env.yml) reflects this change."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit your machine learning code to Azure Machine Learning\n",
"Submit your code once more. This time the widget includes the metrics where you can now see live updates on the model training loss!"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"remote run",
"batchai",
"configure run",
"use notebook widget",
"get metrics"
]
},
"outputs": [],
"source": [
"from azureml.core import Workspace, Experiment, Environment, ScriptRunConfig\n",
"from azureml.widgets import RunDetails\n",
"\n",
"ws = Workspace.from_config()\n",
"experiment = Experiment(workspace=ws, name='day1-experiment-train')\n",
"config = ScriptRunConfig(source_directory='code/pytorch-cifar10-train-with-logging', script='train.py', compute_target='cpu-cluster')\n",
"\n",
"env = Environment.from_conda_specification(name='pytorch-aml-env', file_path='configuration/pytorch-aml-env.yml')\n",
"config.run_config.environment = env\n",
"\n",
"run = experiment.submit(config)\n",
"RunDetails(run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"In this session, you upgraded from a basic \"Hello world!\" script to a more realistic\n",
"training script that required a specific Python environment to run. You saw how\n",
"to take a local Conda environment to the cloud with Azure Machine Learning Environments. Finally, you\n",
"saw how in a few lines of code you can log metrics to Azure Machine Learning.\n",
"\n",
"In the next session, you'll see how to work with data in Azure Machine Learning by uploading the CIFAR10\n",
"dataset to Azure.\n",
"\n",
"[Tutorial: Bring your own data](day1-part4-data.ipynb)\n"
]
}
],
"metadata": {
"authors": [
{
"name": "samkemp"
}
],
"celltoolbar": "Edit Metadata",
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
},
"notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License."
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -1,7 +0,0 @@
name: day1-part3-train-model
dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- pytorch
- torchvision

View File

@@ -1,297 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/get-started-day1/day1-part4-data.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Tutorial: Bring your own data (Part 4 of 4)\n",
"\n",
"---\n",
"## Introduction\n",
"\n",
"In the previous [Tutorial: Train a model in the cloud](day1-part3-train-model.ipynb) article, the CIFAR10 data was downloaded using the inbuilt `torchvision.datasets.CIFAR10` method in the PyTorch API. However, in many cases you are going to want to use your own data in a remote training run. This article focuses on the workflow you can leverage such that you can work with your own data in Azure Machine Learning. \n",
"\n",
"By the end of this tutorial you would have a better understanding of:\n",
"\n",
"- How to upload your data to Azure\n",
"- Best practices for working with cloud data in Azure Machine Learning\n",
"- Working with command-line arguments\n",
"\n",
"This notebook follows the steps provided on the [Python (day 1) - bring your own data documentation page](https://aka.ms/day1aml).\n",
"\n",
"## Prerequisites\n",
"\n",
"- You have completed:\n",
" - Setup on your [Azure Machine Learning Compute Cluster](day1-part1-setup.ipynb)\n",
" - [Tutorial: Hello World](day1-part2-hello-world.ipynb)\n",
" - [Tutorial: Train a model in the cloud](day1-part3-train-model.ipynb)\n",
"- Familiarity with Python and Machine Learning concepts\n",
"- If you are using a compute instance in Azure Machine Learning to run this notebook series, you are all set. Otherwise, please follow the [Configure a development environment for Azure Machine Learning](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment)\n",
"\n",
"---\n",
"\n",
"## Your machine learning code\n",
"\n",
"By now you have your training script running in Azure Machine Learning, and can monitor the model performance. Let's _parametrize_ the training script by introducing\n",
"arguments. Using arguments will allow you to easily compare different hyperparmeters.\n",
"\n",
"Presently our training script is set to download the CIFAR10 dataset on each run. The python code in [code/pytorch-cifar10-your-data/train.py](code/pytorch-cifar10-your-data/train.py) now uses **`argparse` to parametize the script.**"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Understanding your machine learning code changes\n",
"\n",
"The code used in `train.py` has leveraged the `argparse` library to set up the `data_path`, `learning_rate`, and `momentum`.\n",
"\n",
"```python\n",
"# .... other code\n",
"parser = argparse.ArgumentParser()\n",
"parser.add_argument('--data_path', type=str, help='Path to the training data')\n",
"parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate for SGD')\n",
"parser.add_argument('--momentum', type=float, default=0.9, help='Momentum for SGD')\n",
"args = parser.parse_args()\n",
"# ... other code\n",
"```\n",
"\n",
"Also the `train.py` script was adapted to update the optimizer to use the user-defined parameters:\n",
"\n",
"```python\n",
"optimizer = optim.SGD(\n",
" net.parameters(),\n",
" lr=args.learning_rate, # get learning rate from command-line argument\n",
" momentum=args.momentum, # get momentum from command-line argument\n",
")\n",
"```\n",
"\n",
"## Test your machine learning code locally\n",
"\n",
"To run the modified training script locally, run the python command below.\n",
"\n",
"You avoid having to download the CIFAR10 dataset by passing in a local path to the\n",
"data. Also you can experiment with different values for _learning rate_ and\n",
"_momentum_ hyperparameters without having to hard-code them in the training script.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!python code/pytorch-cifar10-your-data/train.py --data_path ./data --learning_rate 0.003 --momentum 0.92"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Upload your data to Azure\n",
"\n",
"In order to run this script in Azure Machine Learning, you need to make your training data available in Azure. Your Azure Machine Learning workspace comes equipped with a _default_ **Datastore** - an Azure Blob storage account - that you can use to store your training data.\n",
"\n",
"> <span style=\"color:purple; font-weight:bold\">! NOTE <br>\n",
"> Azure Machine Learning allows you to connect other cloud-based datastores that store your data. For more details, see [datastores documentation](./concept-data.md).</span>\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Workspace\n",
"ws = Workspace.from_config()\n",
"datastore = ws.get_default_datastore()\n",
"datastore.upload(src_dir='./data', target_path='datasets/cifar10', overwrite=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The `target_path` specifies the path on the datastore where the CIFAR10 data will be uploaded.\n",
"\n",
"## Submit your machine learning code to Azure Machine Learning\n",
"\n",
"As you have done previously, create a new Python control script:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"remote run",
"batchai",
"configure run",
"use notebook widget",
"get metrics",
"use datastore"
]
},
"outputs": [],
"source": [
"from azureml.core import Workspace, Experiment, Environment, ScriptRunConfig, Dataset\n",
"from azureml.widgets import RunDetails\n",
"\n",
"ws = Workspace.from_config()\n",
"\n",
"datastore = ws.get_default_datastore()\n",
"dataset = Dataset.File.from_files(path=(datastore, 'datasets/cifar10'))\n",
"\n",
"experiment = Experiment(workspace=ws, name='day1-experiment-data')\n",
"\n",
"config = ScriptRunConfig(source_directory='./code/pytorch-cifar10-your-data',\n",
" script='train.py',\n",
" compute_target='cpu-cluster',\n",
" arguments=[\n",
" '--data_path', dataset.as_named_input('input').as_mount(),\n",
" '--learning_rate', 0.003,\n",
" '--momentum', 0.92])\n",
"\n",
"# set up pytorch environment\n",
"env = Environment.from_conda_specification(name='pytorch-aml-env',file_path='configuration/pytorch-aml-env.yml')\n",
"config.run_config.environment = env\n",
"\n",
"run = experiment.submit(config)\n",
"RunDetails(run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Understand the control code\n",
"\n",
"The above control code has the following additional code compared to the control code written in [previous tutorial](03-train-model.ipynb)\n",
"\n",
"**`dataset = Dataset.File.from_files(path=(datastore, 'datasets/cifar10'))`**: A Dataset is used to reference the data you uploaded to the Azure Blob Store. Datasets are an abstraction layer on top of your data that are designed to improve reliability and trustworthiness.\n",
"\n",
"\n",
"**`config = ScriptRunConfig(...)`**: We modified the `ScriptRunConfig` to include a list of arguments that will be passed into `train.py`. We also specified `dataset.as_named_input('input').as_mount()`, which means the directory specified will be _mounted_ to the compute target.\n",
"\n",
"## Inspect the 70_driver_log log file\n",
"\n",
"In the navigate to the 70_driver_log.txt file - you should see the following output:\n",
"\n",
"```\n",
"Processing 'input'.\n",
"Processing dataset FileDataset\n",
"{\n",
" \"source\": [\n",
" \"('workspaceblobstore', 'datasets/cifar10')\"\n",
" ],\n",
" \"definition\": [\n",
" \"GetDatastoreFiles\"\n",
" ],\n",
" \"registration\": {\n",
" \"id\": \"XXXXX\",\n",
" \"name\": null,\n",
" \"version\": null,\n",
" \"workspace\": \"Workspace.create(name='XXXX', subscription_id='XXXX', resource_group='X')\"\n",
" }\n",
"}\n",
"Mounting input to /tmp/tmp9kituvp3.\n",
"Mounted input to /tmp/tmp9kituvp3 as folder.\n",
"Exit __enter__ of DatasetContextManager\n",
"Entering Run History Context Manager.\n",
"Current directory: /mnt/batch/tasks/shared/LS_root/jobs/dsvm-aml/azureml/tutorial-session-3_1600171983_763c5381/mounts/workspaceblobstore/azureml/tutorial-session-3_1600171983_763c5381\n",
"Preparing to call script [ train.py ] with arguments: ['--data_path', '$input', '--learning_rate', '0.003', '--momentum', '0.92']\n",
"After variable expansion, calling script [ train.py ] with arguments: ['--data_path', '/tmp/tmp9kituvp3', '--learning_rate', '0.003', '--momentum', '0.92']\n",
"\n",
"Script type = None\n",
"===== DATA =====\n",
"DATA PATH: /tmp/tmp9kituvp3\n",
"LIST FILES IN DATA PATH...\n",
"['cifar-10-batches-py', 'cifar-10-python.tar.gz']\n",
"```\n",
"\n",
"Notice:\n",
"\n",
"1. Azure Machine Learning has mounted the blob store to the compute cluster automatically for you.\n",
"2. The ``dataset.as_named_input('input').as_mount()`` used in the control script resolves to the mount point\n",
"3. In the machine learning code we include a line to list the directorys under the data directory - you can see the list above.\n",
"\n",
"## Clean up resources\n",
"\n",
"The compute cluster will scale down to zero after 40minutes of idle time. When the compute is idle you will not be charged. If you want to delete the cluster use:\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Workspace\n",
"\n",
"ws = Workspace.from_config()\n",
"ct = ws.compute_targets['cpu-cluster']\n",
"# ct.delete()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If you're not going to use what you've created here, delete the resources you just created with this quickstart so you don't incur any charges for storage. In the Azure portal, select and delete your resource group.\n",
"\n",
"## Next Steps\n",
"\n",
"To learn more about the capabilities of Azure Machine Learning please refer to the following documentation:\n",
"\n",
"* [Azure Machine Learning Pipelines](https://docs.microsoft.com/en-us/azure/machine-learning/concept-ml-pipelines#building-pipelines-with-the-python-sdk)\n",
"* [Deploy models for real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-deploy-models-with-aml)\n",
"* [Hyper parameter tuning with Azure Machine Learning](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters)\n",
"* [Prep your code for production](https://docs.microsoft.com/azure/machine-learning/tutorial-convert-ml-experiment-to-production)"
]
}
],
"metadata": {
"authors": [
{
"name": "samkemp"
}
],
"celltoolbar": "Edit Metadata",
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
},
"notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License."
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -1,7 +0,0 @@
name: day1-part4-data
dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- pytorch
- torchvision

View File

@@ -128,6 +128,9 @@
"metadata": {},
"source": [
"### Create or Attach existing compute resource\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
"\n",
"By using Azure Machine Learning Compute, a managed service, data scientists can train machine learning models on clusters of Azure virtual machines. Examples include VMs with GPU support. In this tutorial, you create Azure Machine Learning Compute as your training environment. You will submit Python code to run on this VM later in the tutorial. \n",
"The code below creates the compute clusters for you if they don't already exist in your workspace.\n",
"\n",

View File

@@ -7,3 +7,4 @@ dependencies:
- sklearn
- pandas
- azureml-opendatasets
- azureml-widgets

View File

@@ -225,7 +225,9 @@
"source": [
"## Create and attach remote compute target\n",
"\n",
"Azure Machine Learning service pipelines cannot be run locally, and only run on cloud resources. Remote compute targets are reusable virtual compute environments where you run experiments and work-flows. Run the following code to create a GPU-enabled [`AmlCompute`](https://docs.microsoft.com/python/api/azureml-core/azureml.core.compute.amlcompute.amlcompute?view=azure-ml-py) target, and attach it to your workspace. See the [conceptual article](https://docs.microsoft.com/azure/machine-learning/service/concept-compute-target) for more information on compute targets."
"Azure Machine Learning service pipelines cannot be run locally, and only run on cloud resources. Remote compute targets are reusable virtual compute environments where you run experiments and work-flows. Run the following code to create a GPU-enabled [`AmlCompute`](https://docs.microsoft.com/python/api/azureml-core/azureml.core.compute.amlcompute.amlcompute?view=azure-ml-py) target, and attach it to your workspace. See the [conceptual article](https://docs.microsoft.com/azure/machine-learning/service/concept-compute-target) for more information on compute targets.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{

View File

@@ -1,4 +0,0 @@
name: ClassificationWithAutomatedML
dependencies:
- pip:
- azureml-sdk

View File

@@ -1,482 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/quickstart/azureml-quickstart.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Tutorial: Azure Machine Learning Quickstart\n",
"\n",
"In this tutorial, you learn how to quickly get started with Azure Machine Learning. Using a *compute instance* - a fully managed cloud-based VM that is pre-configured with the latest data science tools - you will train an image classification model using the CIFAR10 dataset.\n",
"\n",
"In this tutorial you will learn how to:\n",
"\n",
"* Create a compute instance and attach to a notebook\n",
"* Train an image classification model and log metrics\n",
"* Deploy the model\n",
"\n",
"## Prerequisites\n",
"\n",
"1. An Azure Machine Learning workspace\n",
"1. Familiar with the Python language and machine learning workflows.\n",
"\n",
"\n",
"## Create compute & attach to notebook\n",
"\n",
"To run this notebook you will need to create an Azure Machine Learning _compute instance_. The benefits of a compute instance over a local machine (e.g. laptop) or cloud VM are as follows:\n",
"\n",
"* It is a pre-configured with all the latest data science libaries (e.g. panads, scikit, TensorFlow, PyTorch) and tools (Jupyter, RStudio). In this tutorial we make extensive use of PyTorch, AzureML SDK, matplotlib and we do not need to install these components on a compute instance.\n",
"* Notebooks are seperate from the compute instance - this means that you can develop your notebook on a small VM size, and then seamlessly scale up (and/or use a GPU-enabled) the machine when needed to train a model.\n",
"* You can easily turn on/off the instance to control costs. \n",
"\n",
"To create compute, click on the + button at the top of the notebook viewer in Azure Machine Learning Studio:\n",
"\n",
"<img src=\"https://dsvmamlstorage127a5f726f.blob.core.windows.net/images/ci-create.PNG\" width=\"500\"/>\n",
"\n",
"This will pop up the __New compute instance__ blade, provide a valid __Compute name__ (valid characters are upper and lower case letters, digits, and the - character). Then click on __Create__. \n",
"\n",
"It will take approximately 3 minutes for the compute to be ready. When the compute is ready you will see a green light next to the compute name at the top of the notebook viewer:\n",
"\n",
"<img src=\"https://dsvmamlstorage127a5f726f.blob.core.windows.net/images/ci-create2.PNG\" width=\"500\"/>\n",
"\n",
"You will also notice that the notebook is attached to the __Python 3.6 - AzureML__ jupyter Kernel. Other kernels can be selected such as R. In addition, if you did have other instances you can switch to them by simply using the dropdown menu next to the Compute label.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import Data\n",
"\n",
"For this tutorial, you will use the CIFAR10 dataset. It has the classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck. The images in CIFAR-10 three-channel color images of 32x32 pixels in size.\n",
"\n",
"The code cell below uses the PyTorch API to download the data to your compute instance, which should be quick (around 15 seconds). The data is divided into training and test sets.\n",
"\n",
"* **NOTE: The data is downloaded to the compute instance (in the `/tmp` directory) and not a durable cloud-based store like Azure Blob Storage or Azure Data Lake. This means if you delete the compute instance the data will be lost. The [getting started with Azure Machine Learning tutorial series](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup-local) shows how to create an Azure Machine Learning *dataset*, which aids durability, versioning, and collaboration.**"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1600881820920
}
},
"outputs": [],
"source": [
"import torch\n",
"import torch.optim as optim\n",
"import torchvision\n",
"import torchvision.transforms as transforms\n",
"\n",
"transform = transforms.Compose(\n",
" [transforms.ToTensor(),\n",
" transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n",
"\n",
"trainset = torchvision.datasets.CIFAR10(root='/tmp/data', train=True,\n",
" download=True, transform=transform)\n",
"trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,\n",
" shuffle=True, num_workers=2)\n",
"\n",
"testset = torchvision.datasets.CIFAR10(root='/tmp/data', train=False,\n",
" download=True, transform=transform)\n",
"testloader = torch.utils.data.DataLoader(testset, batch_size=4,\n",
" shuffle=False, num_workers=2)\n",
"\n",
"classes = ('plane', 'car', 'bird', 'cat',\n",
" 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Take a look at the data\n",
"In the following cell, you have some python code that displays the first batch of 4 CIFAR10 images:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1600882160868
}
},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"\n",
"def imshow(img):\n",
" img = img / 2 + 0.5 # unnormalize\n",
" npimg = img.numpy()\n",
" plt.imshow(np.transpose(npimg, (1, 2, 0)))\n",
" plt.show()\n",
"\n",
"\n",
"# get some random training images\n",
"dataiter = iter(trainloader)\n",
"images, labels = dataiter.next()\n",
"\n",
"# show images\n",
"imshow(torchvision.utils.make_grid(images))\n",
"# print labels\n",
"print(' '.join('%5s' % classes[labels[j]] for j in range(4)))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train model and log metrics\n",
"\n",
"In the directory `model` you will see a file called [model.py](./model/model.py) that defines the neural network architecture. The model is trained using the code below.\n",
"\n",
"* **Note: The model training take around 4 minutes to complete. The benefit of a compute instance is that the notebooks are separate from the compute - therefore you can easily switch to a different size/type of instance. For example, you could switch to run this training on a GPU-based compute instance if you had one provisioned. In the code below you can see that we have included `torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")`, which detects whether you are using a CPU or GPU machine.**"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1600882387754
},
"tags": [
"local run"
]
},
"outputs": [],
"source": [
"from model.model import Net\n",
"from azureml.core import Experiment\n",
"from azureml.core import Workspace\n",
"\n",
"ws = Workspace.from_config()\n",
"\n",
"device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
"device\n",
"\n",
"exp = Experiment(workspace=ws, name=\"cifar10-experiment\")\n",
"run = exp.start_logging(snapshot_directory=None)\n",
"\n",
"# define convolutional network\n",
"net = Net()\n",
"net.to(device)\n",
"\n",
"# set up pytorch loss / optimizer\n",
"criterion = torch.nn.CrossEntropyLoss()\n",
"optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n",
"\n",
"run.log(\"learning rate\", 0.001)\n",
"run.log(\"momentum\", 0.9)\n",
"\n",
"# train the network\n",
"for epoch in range(1):\n",
" running_loss = 0.0\n",
" for i, data in enumerate(trainloader, 0):\n",
" # unpack the data\n",
" inputs, labels = data[0].to(device), data[1].to(device)\n",
"\n",
" # zero the parameter gradients\n",
" optimizer.zero_grad()\n",
"\n",
" # forward + backward + optimize\n",
" outputs = net(inputs)\n",
" loss = criterion(outputs, labels)\n",
" loss.backward()\n",
" optimizer.step()\n",
"\n",
" # print statistics\n",
" running_loss += loss.item()\n",
" if i % 2000 == 1999:\n",
" loss = running_loss / 2000\n",
" run.log(\"loss\", loss)\n",
" print(f'epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}')\n",
" running_loss = 0.0\n",
"\n",
"print('Finished Training')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Once you have executed the cell below you can view the metrics updating in real time in the Azure Machine Learning studio:\n",
"\n",
"1. Select **Experiments** (left-hand menu)\n",
"1. Select **cifar10-experiment**\n",
"1. Select **Run 1**\n",
"1. Select the **Metrics** Tab\n",
"\n",
"The metrics tab will display the following graph:\n",
"\n",
"<img src=\"https://dsvmamlstorage127a5f726f.blob.core.windows.net/images/metrics-capture.PNG\" alt=\"dataset details\" width=\"500\"/>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Understand the code\n",
"\n",
"The code is based on the [Pytorch 60minute Blitz](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py) where we have also added a few additional lines of code to track the loss metric as the neural network trains.\n",
"\n",
"| Code | Description | \n",
"| ------------- | ---------- |\n",
"| `experiment = Experiment( ... )` | [Experiment](https://docs.microsoft.com/python/api/azureml-core/azureml.core.experiment.experiment?view=azure-ml-py&preserve-view=true) provides a simple way to organize multiple runs under a single name. Later you can see how experiments make it easy to compare metrics between dozens of runs. |\n",
"| `run.log()` | This will log the metrics to Azure Machine Learning. |"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Version control models with the Model Registry\n",
"\n",
"You can use model registration to store and version your models in your workspace. Registered models are identified by name and version. Each time you register a model with the same name as an existing one, the registry increments the version. Azure Machine Learning supports any model that can be loaded through Python 3.\n",
"\n",
"The code below does:\n",
"\n",
"1. Saves the model on the compute instance\n",
"1. Uploads the model file to the run (if you look in the experiment on Azure Machine Learning studio you should see on the **Outputs + logs** tab the model has been saved in the run)\n",
"1. Registers the uploaded model file\n",
"1. Transitions the run to a completed state"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1600888071066
},
"tags": [
"register model from file"
]
},
"outputs": [],
"source": [
"from azureml.core import Model\n",
"\n",
"PATH = 'cifar_net.pth'\n",
"torch.save(net.state_dict(), PATH)\n",
"\n",
"run.upload_file(name=PATH, path_or_stream=PATH)\n",
"model = run.register_model(model_name='cifar10-model', \n",
" model_path=PATH,\n",
" model_framework=Model.Framework.PYTORCH,\n",
" description='cifar10 model')\n",
" \n",
"run.complete()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### View model in the model registry\n",
"\n",
"You can see the stored model by navigating to **Models** in the left-hand menu bar of Azure Machine Learning Studio. Click on the **cifar10-model** and you can see the details of the model like the experiement run id that created the model."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deploy the model\n",
"\n",
"The next cell deploys the model to an Azure Container Instance so that you can score data in real-time (Azure Machine Learning also provides mechanisms to do batch scoring). A real-time endpoint allows application developers to integrate machine learning into their apps.\n",
"\n",
"* **Note: The deployment takes around 3 minutes to complete.**"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"deploy service",
"aci"
]
},
"outputs": [],
"source": [
"from azureml.core import Environment, Model\n",
"from azureml.core.model import InferenceConfig\n",
"from azureml.core.webservice import AciWebservice\n",
"\n",
"environment = Environment.get(ws, \"AzureML-PyTorch-1.6-CPU\")\n",
"model = Model(ws, \"cifar10-model\")\n",
"\n",
"service_name = 'cifar-service'\n",
"inference_config = InferenceConfig(entry_script='score.py', environment=environment)\n",
"aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1)\n",
"\n",
"service = Model.deploy(workspace=ws,\n",
" name=service_name,\n",
" models=[model],\n",
" inference_config=inference_config,\n",
" deployment_config=aci_config,\n",
" overwrite=True)\n",
"service.wait_for_deployment(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Understand the code\n",
"\n",
"| Code | Description | \n",
"| ------------- | ---------- |\n",
"| `environment = Environment.get()` | [Environment](https://docs.microsoft.com/python/api/overview/azure/ml/?view=azure-ml-py#environment) specify the Python packages, environment variables, and software settings around your training and scoring scripts. In this case, you are using a *curated environment* that has all the packages to run PyTorch. |\n",
"| `inference_config = InferenceConfig()` | This specifies the inference (scoring) configuration for the deployment such as the script to use when scoring (see below) and on what environment. |\n",
"| `service = Model.deploy()` | Deploy the model. |\n",
"\n",
"The [*scoring script*](score.py) file is has two functions:\n",
"\n",
"1. an `init` function that executes once when the service starts - in this function you normally get the model from the registry and set global variables\n",
"1. a `run(data)` function that executes each time a call is made to the service. In this function, you normally deserialize the json, run a prediction and output the predicted result.\n",
"\n",
"\n",
"## Test the model service\n",
"\n",
"In the next cell, you get some unseen data from the test loader:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dataiter = iter(testloader)\n",
"images, labels = dataiter.next()\n",
"\n",
"# print images\n",
"imshow(torchvision.utils.make_grid(images))\n",
"print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, the next cell runs scores the above images using the deployed model service."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"\n",
"input_payload = json.dumps({\n",
" 'data': images.tolist()\n",
"})\n",
"\n",
"output = service.run(input_payload)\n",
"print(output)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Clean up resources\n",
"\n",
"To clean up the resources after this quickstart, firstly delete the Model service using:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"service.delete()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Next stop the compute instance by following these steps:\n",
"\n",
"1. Go to **Compute** in the left-hand menu of the Azure Machine Learning studio\n",
"1. Select your compute instance\n",
"1. Select **Stop**\n",
"\n",
"\n",
"**Important: The resources you created can be used as prerequisites to other Azure Machine Learning tutorials and how-to articles.** If you don't plan to use the resources you created, delete them, so you don't incur any charges:\n",
"\n",
"1. In the Azure portal, select **Resource groups** on the far left.\n",
"1. From the list, select the resource group you created.\n",
"1. Select **Delete resource group**.\n",
"1. Enter the resource group name. Then select **Delete**.\n",
"\n",
"You can also keep the resource group but delete a single workspace. Display the workspace properties and select **Delete**."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next Steps\n",
"\n",
"In this tutorial, you have seen how to run your machine learning code on a fully managed, pre-configured cloud-based VM called a *compute instance*. Having a compute instance for your development environment removes the burden of installing data science tooling and libraries (for example, Jupyter, PyTorch, TensorFlow, Scikit) and allows you to easily scale up/down the compute power (RAM, cores) since the notebooks are separated from the VM. \n",
"\n",
"It is often the case that once you have your machine learning code working in a development environment that you want to productionize this by running as a **_job_** - ideally on a schedule or trigger (for example, arrival of new data). To this end, we recommend that you follow [**the day 1 getting started with Azure Machine Learning tutorial**](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup-local). This day 1 tutorial is focussed on running jobs-based machine learning code in the cloud."
]
}
],
"metadata": {
"authors": [
{
"name": "samkemp"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
},
"nteract": {
"version": "nteract-front-end@1.0.0"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -1,7 +0,0 @@
name: azureml-quickstart
dependencies:
- pip:
- azureml-sdk
- pytorch
- torchvision
- matplotlib

View File

@@ -1,22 +0,0 @@
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x

View File

@@ -1,51 +0,0 @@
import os
import torch
import json
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def init():
global net
global classes
model_filename = 'cifar_net.pth'
model_path = os.path.join(os.environ['AZUREML_MODEL_DIR'], model_filename)
net = Net()
net.load_state_dict(torch.load(model_path))
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def run(data):
data = json.loads(data)
images = torch.FloatTensor(data['data'])
outputs = net(images)
_, predicted = torch.max(outputs, 1)
result = [classes[predicted[j]] for j in range(4)]
result_json = json.dumps({"predictions": result})
# You can return any JSON-serializable object.
return result_json