Compare commits

...

48 Commits

Author SHA1 Message Date
Roope Astala
f16bf27e26 Merge pull request #207 from rastala/master
release 1.0.15
2019-02-11 15:18:00 -05:00
Roope Astala
c7bec58593 update version 2019-02-11 15:17:40 -05:00
Roope Astala
cca3996eb4 release 1.0.15 2019-02-11 15:12:30 -05:00
Roope Astala
5fd14bac30 Merge pull request #199 from rastala/master
update automl databricks
2019-02-06 11:53:35 -05:00
Roope Astala
3fa409543b update automl databricks 2019-02-06 11:53:00 -05:00
Josée Martens
42f2822b61 Adding file to enable search performance tracking.
@rastala
2019-02-04 14:36:40 -06:00
Roope Astala
48afbe1cab Delete release.json 2019-01-31 16:07:08 -05:00
Roope Astala
1298c55dd4 Merge pull request #193 from rastala/master
fix broken link
2019-01-31 15:45:01 -05:00
Roope Astala
0aa1b248f4 fix broken link 2019-01-31 15:44:22 -05:00
Roope Astala
3012b8f5a8 Merge pull request #192 from rastala/master
add authentication notebook
2019-01-31 15:41:40 -05:00
Roope Astala
501c55bcaf add authentication notebook 2019-01-31 15:40:51 -05:00
hning86
1a38f50221 docker instructions 2019-01-31 15:16:36 -05:00
hning86
cc64be8d6f text update 2019-01-31 14:29:31 -05:00
hning86
a0127a2a64 dockerfile instruction 2019-01-31 11:46:06 -05:00
Hai Ning
7eb966bf79 Merge pull request #191 from Azure/dockerfiles
Dockerfiles
2019-01-31 10:54:55 -05:00
Roope Astala
9118f2c7ce Merge pull request #190 from rastala/master
fix NBSETUP
2019-01-31 09:33:17 -05:00
Roope Astala
0e3198f311 fix NBSETUP 2019-01-31 09:32:30 -05:00
hning86
0fdab91b97 dockefile reorg 2019-01-31 09:21:06 -05:00
hning86
b54be912d8 dockerfiles added 2019-01-30 17:04:18 -05:00
Roope Astala
3d0c7990ff Merge pull request #189 from rastala/master
update tutorial readme
2019-01-30 14:28:24 -05:00
Roope Astala
6e1ce29a94 Merge remote-tracking branch 'upstream/master' 2019-01-30 14:26:25 -05:00
Roope Astala
0d26c9986a update tutorials README 2019-01-30 14:25:17 -05:00
Roope Astala
0514eee64b Merge pull request #182 from rastala/master
version 1.0.10
2019-01-28 18:10:20 -05:00
Roope Astala
4b6e34fdc0 Update train-within-notebook.ipynb 2019-01-28 18:09:36 -05:00
Roope Astala
e01216d85b Update configuration.ipynb 2019-01-28 18:08:41 -05:00
Roope Astala
b00f75edd8 version 1.0.10 2019-01-28 15:30:17 -05:00
Hai Ning
06aba388c6 Update azure-ml-with-nvidia-rapids.ipynb 2019-01-24 10:09:31 -05:00
Roope Astala
3018461dfc Merge pull request #176 from rastala/master
update tutorials
2019-01-22 14:25:28 -05:00
Roope Astala
0d91f2d697 update tutorials 2019-01-22 14:24:31 -05:00
Roope Astala
a14cb635f0 Merge pull request #175 from rastala/master
RAPIDS sample
2019-01-22 13:44:55 -05:00
Roope Astala
88f6a966cc RAPIDS sample 2019-01-22 13:32:59 -05:00
Hai Ning
4f76a844c6 Update README.md 2019-01-18 01:18:44 -05:00
Hai Ning
c1573ff949 Update NBSETUP.md 2019-01-18 01:15:53 -05:00
Hai Ning
d1b18b3771 Update NBSETUP.md 2019-01-18 01:09:13 -05:00
Roope Astala
e1a948f4cd Merge pull request #168 from rastala/master
version 1.0.8
2019-01-14 12:14:02 -08:00
Roope Astala
3ca40c0817 version 1.0.8 2019-01-14 15:13:30 -05:00
Roope Astala
f724cb4d9b Merge pull request #166 from jeff-shepherd/master
Fixed broken links in tutorials
2019-01-08 12:01:50 -08:00
Jeff Shepherd
094b4b3b13 Fixed broken links in tutorials 2019-01-08 11:58:03 -08:00
Roope Astala
d09942f521 Merge pull request #165 from rastala/master
databricks update
2019-01-08 09:24:11 -08:00
Roope Astala
0c9e527174 databricks update 2019-01-08 12:23:15 -05:00
Roope Astala
e2640e54da Merge pull request #160 from rastala/master
Create aml-pipelines-concept.png
2019-01-02 12:03:13 -08:00
Roope Astala
d348baf8a1 Create aml-pipelines-concept.png 2019-01-02 15:02:25 -05:00
Roope Astala
b41e11e30d Merge pull request #159 from jeff-shepherd/master
Removed databricks notebook link
2019-01-02 11:56:15 -08:00
Jeff Shepherd
c1aa951867 Removed databricks notebook link 2019-01-02 11:45:52 -08:00
Roope Astala
5fe5f06e07 Merge pull request #158 from rastala/master
Create Databricks_AMLSDK_1-4_6.dbc
2019-01-02 10:52:24 -08:00
Roope Astala
e8a09c49b1 Create Databricks_AMLSDK_1-4_6.dbc 2019-01-02 13:51:29 -05:00
Roope Astala
fb6a73a790 Merge pull request #145 from rastala/master
fix databricks
2018-12-20 13:11:17 -08:00
Roope Astala
c2968b6526 fix databricks 2018-12-20 16:10:27 -05:00
99 changed files with 31249 additions and 27074 deletions

View File

@@ -0,0 +1,29 @@
FROM continuumio/miniconda:4.5.11
# install git
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
# create a new conda environment named azureml
RUN conda create -n azureml -y -q Python=3.6
# install additional packages used by sample notebooks. this is optional
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
# install azurmel-sdk components
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.10"]
# clone Azure ML GitHub sample notebooks
RUN cd /home && git clone -b "azureml-sdk-1.0.10" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
# generate jupyter configuration file
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
# set an emtpy token for Jupyter to remove authentication.
# this is NOT recommended for production environment
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
# open up port 8887 on the container
EXPOSE 8887
# start Jupyter notebook server on port 8887 when the container starts
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"

View File

@@ -0,0 +1,29 @@
FROM continuumio/miniconda:4.5.11
# install git
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
# create a new conda environment named azureml
RUN conda create -n azureml -y -q Python=3.6
# install additional packages used by sample notebooks. this is optional
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
# install azurmel-sdk components
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.2"]
# clone Azure ML GitHub sample notebooks
RUN cd /home && git clone -b "azureml-sdk-1.0.2" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
# generate jupyter configuration file
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
# set an emtpy token for Jupyter to remove authentication.
# this is NOT recommended for production environment
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
# open up port 8887 on the container
EXPOSE 8887
# start Jupyter notebook server on port 8887 when the container starts
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"

View File

@@ -0,0 +1,29 @@
FROM continuumio/miniconda:4.5.11
# install git
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
# create a new conda environment named azureml
RUN conda create -n azureml -y -q Python=3.6
# install additional packages used by sample notebooks. this is optional
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
# install azurmel-sdk components
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.6"]
# clone Azure ML GitHub sample notebooks
RUN cd /home && git clone -b "azureml-sdk-1.0.6" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
# generate jupyter configuration file
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
# set an emtpy token for Jupyter to remove authentication.
# this is NOT recommended for production environment
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
# open up port 8887 on the container
EXPOSE 8887
# start Jupyter notebook server on port 8887 when the container starts
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"

View File

@@ -0,0 +1,29 @@
FROM continuumio/miniconda:4.5.11
# install git
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
# create a new conda environment named azureml
RUN conda create -n azureml -y -q Python=3.6
# install additional packages used by sample notebooks. this is optional
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
# install azurmel-sdk components
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.8"]
# clone Azure ML GitHub sample notebooks
RUN cd /home && git clone -b "azureml-sdk-1.0.8" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
# generate jupyter configuration file
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
# set an emtpy token for Jupyter to remove authentication.
# this is NOT recommended for production environment
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
# open up port 8887 on the container
EXPOSE 8887
# start Jupyter notebook server on port 8887 when the container starts
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"

View File

@@ -1,10 +1,11 @@
# Notebook setup # Setting up environment
--- ---
To run the notebooks in this repository use one of these methods: To run the notebooks in this repository use one of following options.
## Use Azure Notebooks - Jupyter based notebooks in the Azure cloud ## **Option 1: Use Azure Notebooks**
Azure Notebooks is a hosted Jupyter-based notebook service in the Azure cloud. Azure Machine Learning Python SDK is already pre-installed in the Azure Notebooks `Python 3.6` kernel.
1. [![Azure Notebooks](https://notebooks.azure.com/launch.png)](https://aka.ms/aml-clone-azure-notebooks) 1. [![Azure Notebooks](https://notebooks.azure.com/launch.png)](https://aka.ms/aml-clone-azure-notebooks)
[Import sample notebooks ](https://aka.ms/aml-clone-azure-notebooks) into Azure Notebooks [Import sample notebooks ](https://aka.ms/aml-clone-azure-notebooks) into Azure Notebooks
@@ -15,20 +16,91 @@ To run the notebooks in this repository use one of these methods:
![set kernel to Python 3.6](images/python36.png) ![set kernel to Python 3.6](images/python36.png)
## **Use your own notebook server** ## **Option 2: Use your own notebook server**
Video walkthrough: ### Quick installation
We recommend you create a Python virtual environment ([Miniconda](https://conda.io/miniconda.html) preferred but [virtualenv](https://virtualenv.pypa.io/en/latest/) works too) and install the SDK in it.
```sh
# install just the base SDK
pip install azureml-sdk
# clone the sample repoistory
git clone https://github.com/Azure/MachineLearningNotebooks.git
# below steps are optional
# install the base SDK and a Jupyter notebook server
pip install azureml-sdk[notebooks]
# install the data prep component
pip install azureml-dataprep
# install model explainability component
pip install azureml-sdk[explain]
# install automated ml components
pip install azureml-sdk[automl]
# install experimental features (not ready for production use)
pip install azureml-sdk[contrib]
```
Note the _extras_ (the keywords inside the square brackets) can be combined. For example:
```sh
# install base SDK, Jupyter notebook and automated ml components
pip install azureml-sdk[notebooks,automl]
```
### Full instructions
[Install the Azure Machine Learning SDK](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python)
Please make sure you start with the [Configuration](configuration.ipynb) notebook to create and connect to a workspace.
### Video walkthrough:
[![Get Started video](images/yt_cover.png)](https://youtu.be/VIsXeTuW3FU) [![Get Started video](images/yt_cover.png)](https://youtu.be/VIsXeTuW3FU)
1. Setup a Jupyter Notebook server and [install the Azure Machine Learning SDK](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python)
1. Clone [this repository](https://aka.ms/aml-notebooks)
1. You may need to install other packages for specific notebook
- For example, to run the Azure Machine Learning Data Prep notebooks, install the extra dataprep SDK:
```bash
pip install azureml-dataprep
```
1. Start your notebook server ## **Option 3: Use Docker**
1. Follow the instructions in the [Configuration](configuration.ipynb) notebook to create and connect to a workspace
1. Open one of the sample notebooks You need to have Docker engine installed locally and running. Open a command line window and type the following command.
__Note:__ We use version `1.0.10` below as an exmaple, but you can replace that with any available version number you like.
```sh
# clone the sample repoistory
git clone https://github.com/Azure/MachineLearningNotebooks.git
# change current directory to the folder
# where Dockerfile of the specific SDK version is located.
cd MachineLearningNotebooks/Dockerfiles/1.0.10
# build a Docker image with the a name (azuremlsdk for example)
# and a version number tag (1.0.10 for example).
# this can take several minutes depending on your computer speed and network bandwidth.
docker build . -t azuremlsdk:1.0.10
# launch the built Docker container which also automatically starts
# a Jupyter server instance listening on port 8887 of the host machine
docker run -it -p 8887:8887 azuremlsdk:1.0.10
```
Now you can point your browser to http://localhost:8887. We recommend that you start from the `configuration.ipynb` notebook at the root directory.
If you need additional Azure ML SDK components, you can either modify the Docker files before you build the Docker images to add additional steps, or install them through command line in the live container after you build the Docker image. For example:
```sh
# install dataprep components
pip install azureml-dataprep
# install the core SDK and automated ml components
pip install azureml-sdk[automl]
# install the core SDK and model explainability component
pip install azureml-sdk[explain]
# install the core SDK and experimental components
pip install azureml-sdk[contrib]
```
Drag and Drop
The image will be downloaded by Fatkun

View File

@@ -1,40 +1,59 @@
# Azure Machine Learning service sample notebooks # Azure Machine Learning service example notebooks
---
This repository contains example notebooks demonstrating the [Azure Machine Learning](https://azure.microsoft.com/en-us/services/machine-learning-service/) Python SDK This repository contains example notebooks demonstrating the [Azure Machine Learning](https://azure.microsoft.com/en-us/services/machine-learning-service/) Python SDK
which allows you to build, train, deploy and manage machine learning solutions using Azure. The AML SDK which allows you to build, train, deploy and manage machine learning solutions using Azure. The AML SDK
allows you the choice of using local or cloud compute resources, while managing allows you the choice of using local or cloud compute resources, while managing
and maintaining the complete data science workflow from the cloud. and maintaining the complete data science workflow from the cloud.
* Read [instructions on setting up notebooks](./NBSETUP.md) to run these notebooks. ![Azure ML workflow](https://raw.githubusercontent.com/MicrosoftDocs/azure-docs/master/articles/machine-learning/service/media/overview-what-is-azure-ml/aml.png)
* Find quickstarts, end-to-end tutorials, and how-tos on the [official documentation site for Azure Machine Learning service](https://docs.microsoft.com/en-us/azure/machine-learning/service/). ## Quick installation
```sh
pip install azureml-sdk
```
Read more detailed instructions on [how to set up your environment](./NBSETUP.md) using Azure Notebook service, your own Jupyter notebook server, or Docker.
## Getting Started ## How to navigate and use the example notebooks?
You should always run the [Configuration](./configuration.ipynb) notebook first when setting up a notebook library on a new machine or in a new environment. It configures your notebook library to connect to an Azure Machine Learning workspace, and sets up your workspace and compute to be used by many of the other examples.
These examples will provide you with an effective way to get started using AML. Once you're familiar with If you want to...
some of the capabilities, explore the repository for specific topics.
- [Configuration](./configuration.ipynb) configures your notebook library to easily connect to an * ...try out and explore Azure ML, start with image classification tutorials [part 1 training](./tutorials/img-classification-part1-training.ipynb) and [part 2 deployment](./tutorials/img-classification-part2-deploy.ipynb).
Azure Machine Learning workspace, and sets up your workspace to be used by many of the other examples. You should * ...learn about experimentation and tracking run history, first [train within Notebook](./how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb), then try [training on remote VM](./how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb) and [using logging APIs](./how-to-use-azureml/training/logging-api/logging-api.ipynb).
always run this first when setting up a notebook library on a new machine or in a new environment * ...train deep learning models at scale, first learn about [Machine Learning Compute](./how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb), and then try [distributed hyperparameter tuning](./how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) and [distributed training](./how-to-use-azureml/training-with-deep-learning/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb).
- [Train in notebook](./how-to-use-azureml/training/train-within-notebook) shows how to create a model directly in a notebook while recording * ...deploy model as realtime scoring service, first learn the basics by [training within Notebook and deploying to Azure Container Instance](./how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb), then learn how to [register and manage models, and create Docker images](./how-to-use-azureml/deployment/register-model-create-image-deploy-service/register-model-create-image-deploy-service.ipynb), and [production deploy models on Azure Kubernetes Cluster](./how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb).
metrics and deploy that model to a test service * ...deploy models as batch scoring service, first [train a model within Notebook](./how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb), learn how to [register and manage models](./how-to-use-azureml/deployment/register-model-create-image-deploy-service/register-model-create-image-deploy-service.ipynb), then [create Machine Learning Compute for scoring compute](./how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb), and [use Machine Learning Pipelines to deploy your model](./how-to-use-azureml/machine-learning-pipelines/pipeline-mpi-batch-prediction.ipynb).
- [Train on remote](./how-to-use-azureml/training/train-on-remote-vm) takes the previous example and shows how to create the model on a cloud compute target * ...monitor your deployed models, learn about using [App Insights](./how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb) and [model data collection](./how-to-use-azureml/deployment/enable-data-collection-for-models-in-aks/enable-data-collection-for-models-in-aks.ipynb).
- [Production deploy to AKS](./how-to-use-azureml/deployment/production-deploy-to-aks) shows how to create a production grade inferencing webservice
## Tutorials ## Tutorials
The [Tutorials](./tutorials) folder contains notebooks for the tutorials described in the [Azure Machine Learning documentation](https://aka.ms/aml-docs) The [Tutorials](./tutorials) folder contains notebooks for the tutorials described in the [Azure Machine Learning documentation](https://aka.ms/aml-docs)
## How to use AML ## How to use Azure ML
The [How to use AML](./how-to-use-azureml) folder contains specific examples demonstrating the features of the Azure Machine Learning SDK The [How to use Azure ML](./how-to-use-azureml) folder contains specific examples demonstrating the features of the Azure Machine Learning SDK
- [Training](./how-to-use-azureml/training) - Examples of how to build models using Azure ML's logging and execution capabilities on local and remote compute targets. - [Training](./how-to-use-azureml/training) - Examples of how to build models using Azure ML's logging and execution capabilities on local and remote compute targets
- [Training with Deep Learning](./how-to-use-azureml/training-with-deep-learning) - Examples demonstrating how to build deep learning models using estimators and parameter sweeps - [Training with Deep Learning](./how-to-use-azureml/training-with-deep-learning) - Examples demonstrating how to build deep learning models using estimators and parameter sweeps
- [Manage Azure ML Service](./how-to-use-azureml/manage-azureml-service) - Examples how to perform tasks, such as authenticate against Azure ML service in different ways.
- [Automated Machine Learning](./how-to-use-azureml/automated-machine-learning) - Examples using Automated Machine Learning to automatically generate optimal machine learning pipelines and models - [Automated Machine Learning](./how-to-use-azureml/automated-machine-learning) - Examples using Automated Machine Learning to automatically generate optimal machine learning pipelines and models
- [Machine Learning Pipelines](./how-to-use-azureml/machine-learning-pipelines) - Examples showing how to create and use reusable pipelines for training and batch scoring - [Machine Learning Pipelines](./how-to-use-azureml/machine-learning-pipelines) - Examples showing how to create and use reusable pipelines for training and batch scoring
- [Deployment](./how-to-use-azureml/deployment) - Examples showing how to deploy and manage machine learning models and solutions - [Deployment](./how-to-use-azureml/deployment) - Examples showing how to deploy and manage machine learning models and solutions
- [Azure Databricks](./how-to-use-azureml/azure-databricks) - Examples showing how to use Azure ML with Azure Databricks - [Azure Databricks](./how-to-use-azureml/azure-databricks) - Examples showing how to use Azure ML with Azure Databricks
---
## Documentation
* Quickstarts, end-to-end tutorials, and how-tos on the [official documentation site for Azure Machine Learning service](https://docs.microsoft.com/en-us/azure/machine-learning/service/).
* [Python SDK reference]( https://docs.microsoft.com/en-us/python/api/overview/azure/ml/intro?view=azure-ml-py)
---
## Projects using Azure Machine Learning
Visit following repos to see projects contributed by Azure ML users:
- [Fine tune natural language processing models using Azure Machine Learning service](https://github.com/Microsoft/AzureML-BERT)
- [Fashion MNIST with Azure ML SDK](https://github.com/amynic/azureml-sdk-fashion)

View File

@@ -96,7 +96,7 @@
"source": [ "source": [
"import azureml.core\n", "import azureml.core\n",
"\n", "\n",
"print(\"This notebook was created using version 1.0.6 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.0.15 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -0,0 +1,409 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# NVIDIA RAPIDS in Azure Machine Learning"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The [RAPIDS](https://www.developer.nvidia.com/rapids) suite of software libraries from NVIDIA enables the execution of end-to-end data science and analytics pipelines entirely on GPUs. In many machine learning projects, a significant portion of the model training time is spent in setting up the data; this stage of the process is known as Extraction, Transformation and Loading, or ETL. By using the DataFrame API for ETL and GPU-capable ML algorithms in RAPIDS, data preparation and training models can be done in GPU-accelerated end-to-end pipelines without incurring serialization costs between the pipeline stages. This notebook demonstrates how to use NVIDIA RAPIDS to prepare data and train model in Azure.\n",
" \n",
"In this notebook, we will do the following:\n",
" \n",
"* Create an Azure Machine Learning Workspace\n",
"* Create an AMLCompute target\n",
"* Use a script to process our data and train a model\n",
"* Obtain the data required to run this sample\n",
"* Create an AML run configuration to launch a machine learning job\n",
"* Run the script to prepare data for training and train the model\n",
" \n",
"Prerequisites:\n",
"* An Azure subscription to create a Machine Learning Workspace\n",
"* Familiarity with the Azure ML SDK (refer to [notebook samples](https://github.com/Azure/MachineLearningNotebooks))\n",
"* A Jupyter notebook environment with Azure Machine Learning SDK installed. Refer to instructions to [setup the environment](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#local)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Verify if Azure ML SDK is installed"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"print(\"SDK version:\", azureml.core.VERSION)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from azureml.core import Workspace, Experiment\n",
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
"from azureml.data.data_reference import DataReference\n",
"from azureml.core.runconfig import RunConfiguration\n",
"from azureml.core import ScriptRunConfig\n",
"from azureml.widgets import RunDetails"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create Azure ML Workspace"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The following step is optional if you already have a workspace. If you want to use an existing workspace, then\n",
"skip this workspace creation step and move on to the next step to load the workspace.\n",
" \n",
"<font color='red'>Important</font>: in the code cell below, be sure to set the correct values for the subscription_id, \n",
"resource_group, workspace_name, region before executing this code cell."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"subscription_id = os.environ.get(\"SUBSCRIPTION_ID\", \"<subscription_id>\")\n",
"resource_group = os.environ.get(\"RESOURCE_GROUP\", \"<resource_group>\")\n",
"workspace_name = os.environ.get(\"WORKSPACE_NAME\", \"<workspace_name>\")\n",
"workspace_region = os.environ.get(\"WORKSPACE_REGION\", \"<region>\")\n",
"\n",
"ws = Workspace.create(workspace_name, subscription_id=subscription_id, resource_group=resource_group, location=workspace_region)\n",
"\n",
"# write config to a local directory for future use\n",
"ws.write_config()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load existing Workspace"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"# if a locally-saved configuration file for the workspace is not available, use the following to load workspace\n",
"# ws = Workspace(subscription_id=subscription_id, resource_group=resource_group, workspace_name=workspace_name)\n",
"print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n",
" 'Subscription id: ' + ws.subscription_id, \n",
" 'Resource group: ' + ws.resource_group, sep = '\\n')\n",
"\n",
"scripts_folder = \"scripts_folder\"\n",
"\n",
"if not os.path.isdir(scripts_folder):\n",
" os.mkdir(scripts_folder)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create AML Compute Target"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Because NVIDIA RAPIDS requires P40 or V100 GPUs, the user needs to specify compute targets from one of [NC_v3](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv3-series), [NC_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv2-series), [ND](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#nd-series) or [ND_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ndv2-series-preview) virtual machine types in Azure; these are the families of virtual machines in Azure that are provisioned with these GPUs.\n",
" \n",
"Pick one of the supported VM SKUs based on the number of GPUs you want to use for ETL and training in RAPIDS.\n",
" \n",
"The script in this notebook is implemented for single-machine scenarios. An example supporting multiple nodes will be published later."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"gpu_cluster_name = \"gpucluster\"\n",
"\n",
"if gpu_cluster_name in ws.compute_targets:\n",
" gpu_cluster = ws.compute_targets[gpu_cluster_name]\n",
" if gpu_cluster and type(gpu_cluster) is AmlCompute:\n",
" print('found compute target. just use it. ' + gpu_cluster_name)\n",
"else:\n",
" print(\"creating new cluster\")\n",
" # vm_size parameter below could be modified to one of the RAPIDS-supported VM types\n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"Standard_NC6s_v2\", min_nodes=1, max_nodes = 1)\n",
"\n",
" # create the cluster\n",
" gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, provisioning_config)\n",
" gpu_cluster.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Script to process data and train model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The _process&#95;data.py_ script used in the step below is a slightly modified implementation of [RAPIDS E2E example](https://github.com/rapidsai/notebooks/blob/master/mortgage/E2E.ipynb)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# copy process_data.py into the script folder\n",
"import shutil\n",
"shutil.copy('./process_data.py', os.path.join(scripts_folder, 'process_data.py'))\n",
"\n",
"with open(os.path.join(scripts_folder, './process_data.py'), 'r') as process_data_script:\n",
" print(process_data_script.read())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Data required to run this sample"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This sample uses [Fannie Mae\u00e2\u20ac\u2122s Single-Family Loan Performance Data](http://www.fanniemae.com/portal/funding-the-market/data/loan-performance-data.html). Refer to the 'Available mortgage datasets' section in [instructions](https://rapidsai.github.io/demos/datasets/mortgage-data) to get sample data.\n",
"\n",
"Once you obtain access to the data, you will need to make this data available in an [Azure Machine Learning Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data), for use in this sample."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<font color='red'>Important</font>: The following step assumes the data is uploaded to the Workspace's default data store under a folder named 'mortgagedata2000_01'. Note that uploading data to the Workspace's default data store is not necessary and the data can be referenced from any datastore, e.g., from Azure Blob or File service, once it is added as a datastore to the workspace. The path_on_datastore parameter needs to be updated, depending on where the data is available. The directory where the data is available should have the following folder structure, as the process_data.py script expects this directory structure:\n",
"* _&lt;data directory>_/acq\n",
"* _&lt;data directory>_/perf\n",
"* _names.csv_\n",
"\n",
"The 'acq' and 'perf' refer to directories containing data files. The _&lt;data directory>_ is the path specified in _path&#95;on&#95;datastore_ parameter in the step below."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ds = ws.get_default_datastore()\n",
"\n",
"# download and uncompress data in a local directory before uploading to data store\n",
"# directory specified in src_dir parameter below should have the acq, perf directories with data and names.csv file\n",
"# ds.upload(src_dir='<local directory that has data>', target_path='mortgagedata2000_01', overwrite=True, show_progress=True)\n",
"\n",
"# data already uploaded to the datastore\n",
"data_ref = DataReference(data_reference_name='data', datastore=ds, path_on_datastore='mortgagedata2000_01')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create AML run configuration to launch a machine learning job"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"AML allows the option of using existing Docker images with prebuilt conda environments. The following step use an existing image from [Docker Hub](https://hub.docker.com/r/rapidsai/rapidsai/)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"run_config = RunConfiguration()\n",
"run_config.framework = 'python'\n",
"run_config.environment.python.user_managed_dependencies = True\n",
"# use conda environment named 'rapids' available in the Docker image\n",
"# this conda environment does not include azureml-defaults package that is required for using AML functionality like metrics tracking, model management etc.\n",
"run_config.environment.python.interpreter_path = '/conda/envs/rapids/bin/python'\n",
"run_config.target = gpu_cluster_name\n",
"run_config.environment.docker.enabled = True\n",
"run_config.environment.docker.gpu_support = True\n",
"# if registry is not mentioned the image is pulled from Docker Hub\n",
"run_config.environment.docker.base_image = \"rapidsai/rapidsai:cuda9.2_ubuntu16.04_root\"\n",
"run_config.environment.spark.precache_packages = False\n",
"run_config.data_references={'data':data_ref.to_config()}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Wrapper function to submit Azure Machine Learning experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# parameter cpu_predictor indicates if training should be done on CPU. If set to true, GPUs are used *only* for ETL and *not* for training\n",
"# parameter num_gpu indicates number of GPUs to use among the GPUs available in the VM for ETL and if cpu_predictor is false, for training as well \n",
"def run_rapids_experiment(cpu_training, gpu_count):\n",
" # any value between 1-4 is allowed here depending the type of VMs available in gpu_cluster\n",
" if gpu_count not in [1, 2, 3, 4]:\n",
" raise Exception('Value specified for the number of GPUs to use {0} is invalid'.format(gpu_count))\n",
"\n",
" # following data partition mapping is empirical (specific to GPUs used and current data partitioning scheme) and may need to be tweaked\n",
" gpu_count_data_partition_mapping = {1: 2, 2: 4, 3: 5, 4: 7}\n",
" part_count = gpu_count_data_partition_mapping[gpu_count]\n",
"\n",
" end_year = 2000\n",
" if gpu_count > 2:\n",
" end_year = 2001 # use more data with more GPUs\n",
"\n",
" src = ScriptRunConfig(source_directory=scripts_folder, \n",
" script='process_data.py', \n",
" arguments = ['--num_gpu', gpu_count, '--data_dir', str(data_ref),\n",
" '--part_count', part_count, '--end_year', end_year,\n",
" '--cpu_predictor', cpu_training\n",
" ],\n",
" run_config=run_config\n",
" )\n",
"\n",
" exp = Experiment(ws, 'rapidstest')\n",
" run = exp.submit(config=src)\n",
" RunDetails(run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit experiment (ETL & training on GPU)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cpu_predictor = False\n",
"# the value for num_gpu should be less than or equal to the number of GPUs available in the VM\n",
"num_gpu = 1 \n",
"# train using CPU, use GPU for both ETL and training\n",
"run_rapids_experiment(cpu_predictor, num_gpu)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit experiment (ETL on GPU, training on CPU)\n",
"\n",
"To observe performance difference between GPU-accelerated RAPIDS based training with CPU-only training, set 'cpu_predictor' predictor to 'True' and rerun the experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cpu_predictor = True\n",
"# the value for num_gpu should be less than or equal to the number of GPUs available in the VM\n",
"num_gpu = 1\n",
"# train using CPU, use GPU for ETL\n",
"run_rapids_experiment(cpu_predictor, num_gpu)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Delete cluster"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# delete the cluster\n",
"# gpu_cluster.delete()"
]
}
],
"metadata": {
"authors": [
{
"name": "ksivas"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,500 @@
# License Info: https://github.com/rapidsai/notebooks/blob/master/LICENSE
import numpy as np
import datetime
import dask_xgboost as dxgb_gpu
import dask
import dask_cudf
from dask.delayed import delayed
from dask.distributed import Client, wait
import xgboost as xgb
import cudf
from cudf.dataframe import DataFrame
from collections import OrderedDict
import gc
from glob import glob
import os
import argparse
parser = argparse.ArgumentParser("rapidssample")
parser.add_argument("--data_dir", type=str, help="location of data")
parser.add_argument("--num_gpu", type=int, help="Number of GPUs to use", default=1)
parser.add_argument("--part_count", type=int, help="Number of data files to train against", default=2)
parser.add_argument("--end_year", type=int, help="Year to end the data load", default=2000)
parser.add_argument("--cpu_predictor", type=str, help="Flag to use CPU for prediction", default='False')
parser.add_argument('-f', type=str, default='') # added for notebook execution scenarios
args = parser.parse_args()
data_dir = args.data_dir
num_gpu = args.num_gpu
part_count = args.part_count
end_year = args.end_year
cpu_predictor = args.cpu_predictor.lower() in ('yes', 'true', 't', 'y', '1')
print('data_dir = {0}'.format(data_dir))
print('num_gpu = {0}'.format(num_gpu))
print('part_count = {0}'.format(part_count))
part_count = part_count + 1 # adding one because the usage below is not inclusive
print('end_year = {0}'.format(end_year))
print('cpu_predictor = {0}'.format(cpu_predictor))
import subprocess
cmd = "hostname --all-ip-addresses"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
IPADDR = str(output.decode()).split()[0]
print('IPADDR is {0}'.format(IPADDR))
cmd = "/rapids/notebooks/utils/dask-setup.sh 0"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
cmd = "/rapids/notebooks/utils/dask-setup.sh rapids " + str(num_gpu) + " 8786 8787 8790 " + str(IPADDR) + " MASTER"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print(output.decode())
import dask
from dask.delayed import delayed
from dask.distributed import Client, wait
_client = IPADDR + str(":8786")
client = dask.distributed.Client(_client)
def initialize_rmm_pool():
from librmm_cffi import librmm_config as rmm_cfg
rmm_cfg.use_pool_allocator = True
#rmm_cfg.initial_pool_size = 2<<30 # set to 2GiB. Default is 1/2 total GPU memory
import cudf
return cudf._gdf.rmm_initialize()
def initialize_rmm_no_pool():
from librmm_cffi import librmm_config as rmm_cfg
rmm_cfg.use_pool_allocator = False
import cudf
return cudf._gdf.rmm_initialize()
def run_dask_task(func, **kwargs):
task = func(**kwargs)
return task
def process_quarter_gpu(year=2000, quarter=1, perf_file=""):
ml_arrays = run_dask_task(delayed(run_gpu_workflow),
quarter=quarter,
year=year,
perf_file=perf_file)
return client.compute(ml_arrays,
optimize_graph=False,
fifo_timeout="0ms"
)
def null_workaround(df, **kwargs):
for column, data_type in df.dtypes.items():
if str(data_type) == "category":
df[column] = df[column].astype('int32').fillna(-1)
if str(data_type) in ['int8', 'int16', 'int32', 'int64', 'float32', 'float64']:
df[column] = df[column].fillna(-1)
return df
def run_gpu_workflow(quarter=1, year=2000, perf_file="", **kwargs):
names = gpu_load_names()
acq_gdf = gpu_load_acquisition_csv(acquisition_path= acq_data_path + "/Acquisition_"
+ str(year) + "Q" + str(quarter) + ".txt")
acq_gdf = acq_gdf.merge(names, how='left', on=['seller_name'])
acq_gdf.drop_column('seller_name')
acq_gdf['seller_name'] = acq_gdf['new']
acq_gdf.drop_column('new')
perf_df_tmp = gpu_load_performance_csv(perf_file)
gdf = perf_df_tmp
everdf = create_ever_features(gdf)
delinq_merge = create_delinq_features(gdf)
everdf = join_ever_delinq_features(everdf, delinq_merge)
del(delinq_merge)
joined_df = create_joined_df(gdf, everdf)
testdf = create_12_mon_features(joined_df)
joined_df = combine_joined_12_mon(joined_df, testdf)
del(testdf)
perf_df = final_performance_delinquency(gdf, joined_df)
del(gdf, joined_df)
final_gdf = join_perf_acq_gdfs(perf_df, acq_gdf)
del(perf_df)
del(acq_gdf)
final_gdf = last_mile_cleaning(final_gdf)
return final_gdf
def gpu_load_performance_csv(performance_path, **kwargs):
""" Loads performance data
Returns
-------
GPU DataFrame
"""
cols = [
"loan_id", "monthly_reporting_period", "servicer", "interest_rate", "current_actual_upb",
"loan_age", "remaining_months_to_legal_maturity", "adj_remaining_months_to_maturity",
"maturity_date", "msa", "current_loan_delinquency_status", "mod_flag", "zero_balance_code",
"zero_balance_effective_date", "last_paid_installment_date", "foreclosed_after",
"disposition_date", "foreclosure_costs", "prop_preservation_and_repair_costs",
"asset_recovery_costs", "misc_holding_expenses", "holding_taxes", "net_sale_proceeds",
"credit_enhancement_proceeds", "repurchase_make_whole_proceeds", "other_foreclosure_proceeds",
"non_interest_bearing_upb", "principal_forgiveness_upb", "repurchase_make_whole_proceeds_flag",
"foreclosure_principal_write_off_amount", "servicing_activity_indicator"
]
dtypes = OrderedDict([
("loan_id", "int64"),
("monthly_reporting_period", "date"),
("servicer", "category"),
("interest_rate", "float64"),
("current_actual_upb", "float64"),
("loan_age", "float64"),
("remaining_months_to_legal_maturity", "float64"),
("adj_remaining_months_to_maturity", "float64"),
("maturity_date", "date"),
("msa", "float64"),
("current_loan_delinquency_status", "int32"),
("mod_flag", "category"),
("zero_balance_code", "category"),
("zero_balance_effective_date", "date"),
("last_paid_installment_date", "date"),
("foreclosed_after", "date"),
("disposition_date", "date"),
("foreclosure_costs", "float64"),
("prop_preservation_and_repair_costs", "float64"),
("asset_recovery_costs", "float64"),
("misc_holding_expenses", "float64"),
("holding_taxes", "float64"),
("net_sale_proceeds", "float64"),
("credit_enhancement_proceeds", "float64"),
("repurchase_make_whole_proceeds", "float64"),
("other_foreclosure_proceeds", "float64"),
("non_interest_bearing_upb", "float64"),
("principal_forgiveness_upb", "float64"),
("repurchase_make_whole_proceeds_flag", "category"),
("foreclosure_principal_write_off_amount", "float64"),
("servicing_activity_indicator", "category")
])
print(performance_path)
return cudf.read_csv(performance_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1)
def gpu_load_acquisition_csv(acquisition_path, **kwargs):
""" Loads acquisition data
Returns
-------
GPU DataFrame
"""
cols = [
'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',
'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',
'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',
'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',
'relocation_mortgage_indicator'
]
dtypes = OrderedDict([
("loan_id", "int64"),
("orig_channel", "category"),
("seller_name", "category"),
("orig_interest_rate", "float64"),
("orig_upb", "int64"),
("orig_loan_term", "int64"),
("orig_date", "date"),
("first_pay_date", "date"),
("orig_ltv", "float64"),
("orig_cltv", "float64"),
("num_borrowers", "float64"),
("dti", "float64"),
("borrower_credit_score", "float64"),
("first_home_buyer", "category"),
("loan_purpose", "category"),
("property_type", "category"),
("num_units", "int64"),
("occupancy_status", "category"),
("property_state", "category"),
("zip", "int64"),
("mortgage_insurance_percent", "float64"),
("product_type", "category"),
("coborrow_credit_score", "float64"),
("mortgage_insurance_type", "float64"),
("relocation_mortgage_indicator", "category")
])
print(acquisition_path)
return cudf.read_csv(acquisition_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1)
def gpu_load_names(**kwargs):
""" Loads names used for renaming the banks
Returns
-------
GPU DataFrame
"""
cols = [
'seller_name', 'new'
]
dtypes = OrderedDict([
("seller_name", "category"),
("new", "category"),
])
return cudf.read_csv(col_names_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1)
def create_ever_features(gdf, **kwargs):
everdf = gdf[['loan_id', 'current_loan_delinquency_status']]
everdf = everdf.groupby('loan_id', method='hash').max()
del(gdf)
everdf['ever_30'] = (everdf['max_current_loan_delinquency_status'] >= 1).astype('int8')
everdf['ever_90'] = (everdf['max_current_loan_delinquency_status'] >= 3).astype('int8')
everdf['ever_180'] = (everdf['max_current_loan_delinquency_status'] >= 6).astype('int8')
everdf.drop_column('max_current_loan_delinquency_status')
return everdf
def create_delinq_features(gdf, **kwargs):
delinq_gdf = gdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status']]
del(gdf)
delinq_30 = delinq_gdf.query('current_loan_delinquency_status >= 1')[['loan_id', 'monthly_reporting_period']].groupby('loan_id', method='hash').min()
delinq_30['delinquency_30'] = delinq_30['min_monthly_reporting_period']
delinq_30.drop_column('min_monthly_reporting_period')
delinq_90 = delinq_gdf.query('current_loan_delinquency_status >= 3')[['loan_id', 'monthly_reporting_period']].groupby('loan_id', method='hash').min()
delinq_90['delinquency_90'] = delinq_90['min_monthly_reporting_period']
delinq_90.drop_column('min_monthly_reporting_period')
delinq_180 = delinq_gdf.query('current_loan_delinquency_status >= 6')[['loan_id', 'monthly_reporting_period']].groupby('loan_id', method='hash').min()
delinq_180['delinquency_180'] = delinq_180['min_monthly_reporting_period']
delinq_180.drop_column('min_monthly_reporting_period')
del(delinq_gdf)
delinq_merge = delinq_30.merge(delinq_90, how='left', on=['loan_id'], type='hash')
delinq_merge['delinquency_90'] = delinq_merge['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
delinq_merge = delinq_merge.merge(delinq_180, how='left', on=['loan_id'], type='hash')
delinq_merge['delinquency_180'] = delinq_merge['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
del(delinq_30)
del(delinq_90)
del(delinq_180)
return delinq_merge
def join_ever_delinq_features(everdf_tmp, delinq_merge, **kwargs):
everdf = everdf_tmp.merge(delinq_merge, on=['loan_id'], how='left', type='hash')
del(everdf_tmp)
del(delinq_merge)
everdf['delinquency_30'] = everdf['delinquency_30'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
everdf['delinquency_90'] = everdf['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
everdf['delinquency_180'] = everdf['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
return everdf
def create_joined_df(gdf, everdf, **kwargs):
test = gdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status', 'current_actual_upb']]
del(gdf)
test['timestamp'] = test['monthly_reporting_period']
test.drop_column('monthly_reporting_period')
test['timestamp_month'] = test['timestamp'].dt.month
test['timestamp_year'] = test['timestamp'].dt.year
test['delinquency_12'] = test['current_loan_delinquency_status']
test.drop_column('current_loan_delinquency_status')
test['upb_12'] = test['current_actual_upb']
test.drop_column('current_actual_upb')
test['upb_12'] = test['upb_12'].fillna(999999999)
test['delinquency_12'] = test['delinquency_12'].fillna(-1)
joined_df = test.merge(everdf, how='left', on=['loan_id'], type='hash')
del(everdf)
del(test)
joined_df['ever_30'] = joined_df['ever_30'].fillna(-1)
joined_df['ever_90'] = joined_df['ever_90'].fillna(-1)
joined_df['ever_180'] = joined_df['ever_180'].fillna(-1)
joined_df['delinquency_30'] = joined_df['delinquency_30'].fillna(-1)
joined_df['delinquency_90'] = joined_df['delinquency_90'].fillna(-1)
joined_df['delinquency_180'] = joined_df['delinquency_180'].fillna(-1)
joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int32')
joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int32')
return joined_df
def create_12_mon_features(joined_df, **kwargs):
testdfs = []
n_months = 12
for y in range(1, n_months + 1):
tmpdf = joined_df[['loan_id', 'timestamp_year', 'timestamp_month', 'delinquency_12', 'upb_12']]
tmpdf['josh_months'] = tmpdf['timestamp_year'] * 12 + tmpdf['timestamp_month']
tmpdf['josh_mody_n'] = ((tmpdf['josh_months'].astype('float64') - 24000 - y) / 12).floor()
tmpdf = tmpdf.groupby(['loan_id', 'josh_mody_n'], method='hash').agg({'delinquency_12': 'max','upb_12': 'min'})
tmpdf['delinquency_12'] = (tmpdf['max_delinquency_12']>3).astype('int32')
tmpdf['delinquency_12'] +=(tmpdf['min_upb_12']==0).astype('int32')
tmpdf.drop_column('max_delinquency_12')
tmpdf['upb_12'] = tmpdf['min_upb_12']
tmpdf.drop_column('min_upb_12')
tmpdf['timestamp_year'] = (((tmpdf['josh_mody_n'] * n_months) + 24000 + (y - 1)) / 12).floor().astype('int16')
tmpdf['timestamp_month'] = np.int8(y)
tmpdf.drop_column('josh_mody_n')
testdfs.append(tmpdf)
del(tmpdf)
del(joined_df)
return cudf.concat(testdfs)
def combine_joined_12_mon(joined_df, testdf, **kwargs):
joined_df.drop_column('delinquency_12')
joined_df.drop_column('upb_12')
joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int16')
joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int8')
return joined_df.merge(testdf, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month'], type='hash')
def final_performance_delinquency(gdf, joined_df, **kwargs):
merged = null_workaround(gdf)
joined_df = null_workaround(joined_df)
merged['timestamp_month'] = merged['monthly_reporting_period'].dt.month
merged['timestamp_month'] = merged['timestamp_month'].astype('int8')
merged['timestamp_year'] = merged['monthly_reporting_period'].dt.year
merged['timestamp_year'] = merged['timestamp_year'].astype('int16')
merged = merged.merge(joined_df, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month'], type='hash')
merged.drop_column('timestamp_year')
merged.drop_column('timestamp_month')
return merged
def join_perf_acq_gdfs(perf, acq, **kwargs):
perf = null_workaround(perf)
acq = null_workaround(acq)
return perf.merge(acq, how='left', on=['loan_id'], type='hash')
def last_mile_cleaning(df, **kwargs):
drop_list = [
'loan_id', 'orig_date', 'first_pay_date', 'seller_name',
'monthly_reporting_period', 'last_paid_installment_date', 'maturity_date', 'ever_30', 'ever_90', 'ever_180',
'delinquency_30', 'delinquency_90', 'delinquency_180', 'upb_12',
'zero_balance_effective_date','foreclosed_after', 'disposition_date','timestamp'
]
for column in drop_list:
df.drop_column(column)
for col, dtype in df.dtypes.iteritems():
if str(dtype)=='category':
df[col] = df[col].cat.codes
df[col] = df[col].astype('float32')
df['delinquency_12'] = df['delinquency_12'] > 0
df['delinquency_12'] = df['delinquency_12'].fillna(False).astype('int32')
for column in df.columns:
df[column] = df[column].fillna(-1)
return df.to_arrow(index=False)
# to download data for this notebook, visit https://rapidsai.github.io/demos/datasets/mortgage-data and update the following paths accordingly
acq_data_path = "{0}/acq".format(data_dir) #"/rapids/data/mortgage/acq"
perf_data_path = "{0}/perf".format(data_dir) #"/rapids/data/mortgage/perf"
col_names_path = "{0}/names.csv".format(data_dir) # "/rapids/data/mortgage/names.csv"
start_year = 2000
#end_year = 2000 # end_year is inclusive -- converted to parameter
#part_count = 2 # the number of data files to train against -- converted to parameter
client.run(initialize_rmm_pool)
# NOTE: The ETL calculates additional features which are then dropped before creating the XGBoost DMatrix.
# This can be optimized to avoid calculating the dropped features.
print("Reading ...")
t1 = datetime.datetime.now()
gpu_dfs = []
gpu_time = 0
quarter = 1
year = start_year
count = 0
while year <= end_year:
for file in glob(os.path.join(perf_data_path + "/Performance_" + str(year) + "Q" + str(quarter) + "*")):
if count < part_count:
gpu_dfs.append(process_quarter_gpu(year=year, quarter=quarter, perf_file=file))
count += 1
print('file: {0}'.format(file))
print('count: {0}'.format(count))
quarter += 1
if quarter == 5:
year += 1
quarter = 1
wait(gpu_dfs)
t2 = datetime.datetime.now()
print("Reading time ...")
print(t2-t1)
print('len(gpu_dfs) is {0}'.format(len(gpu_dfs)))
client.run(cudf._gdf.rmm_finalize)
client.run(initialize_rmm_no_pool)
dxgb_gpu_params = {
'nround': 100,
'max_depth': 8,
'max_leaves': 2**8,
'alpha': 0.9,
'eta': 0.1,
'gamma': 0.1,
'learning_rate': 0.1,
'subsample': 1,
'reg_lambda': 1,
'scale_pos_weight': 2,
'min_child_weight': 30,
'tree_method': 'gpu_hist',
'n_gpus': 1,
'distributed_dask': True,
'loss': 'ls',
'objective': 'gpu:reg:linear',
'max_features': 'auto',
'criterion': 'friedman_mse',
'grow_policy': 'lossguide',
'verbose': True
}
if cpu_predictor:
print('Training using CPUs')
dxgb_gpu_params['predictor'] = 'cpu_predictor'
dxgb_gpu_params['tree_method'] = 'hist'
dxgb_gpu_params['objective'] = 'reg:linear'
else:
print('Training using GPUs')
print('Training parameters are {0}'.format(dxgb_gpu_params))
gpu_dfs = [delayed(DataFrame.from_arrow)(gpu_df) for gpu_df in gpu_dfs[:part_count]]
gpu_dfs = [gpu_df for gpu_df in gpu_dfs]
wait(gpu_dfs)
tmp_map = [(gpu_df, list(client.who_has(gpu_df).values())[0]) for gpu_df in gpu_dfs]
new_map = {}
for key, value in tmp_map:
if value not in new_map:
new_map[value] = [key]
else:
new_map[value].append(key)
del(tmp_map)
gpu_dfs = []
for list_delayed in new_map.values():
gpu_dfs.append(delayed(cudf.concat)(list_delayed))
del(new_map)
gpu_dfs = [(gpu_df[['delinquency_12']], gpu_df[delayed(list)(gpu_df.columns.difference(['delinquency_12']))]) for gpu_df in gpu_dfs]
gpu_dfs = [(gpu_df[0].persist(), gpu_df[1].persist()) for gpu_df in gpu_dfs]
gpu_dfs = [dask.delayed(xgb.DMatrix)(gpu_df[1], gpu_df[0]) for gpu_df in gpu_dfs]
gpu_dfs = [gpu_df.persist() for gpu_df in gpu_dfs]
gc.collect()
labels = None
print('str(gpu_dfs) is {0}'.format(str(gpu_dfs)))
wait(gpu_dfs)
t1 = datetime.datetime.now()
bst = dxgb_gpu.train(client, dxgb_gpu_params, gpu_dfs, labels, num_boost_round=dxgb_gpu_params['nround'])
t2 = datetime.datetime.now()
print("Training time ...")
print(t2-t1)
print('str(bst) is {0}'.format(str(bst)))
print('Exiting script')

View File

@@ -0,0 +1 @@
google-site-verification: googleade5d7141b3f2910.html

View File

@@ -35,7 +35,7 @@ Below are the three execution environments supported by AutoML.
**NOTE**: You should at least have contributor access to your Azure subcription to run the notebook. **NOTE**: You should at least have contributor access to your Azure subcription to run the notebook.
- Please remove the previous SDK version if there is any and install the latest SDK by installing **azureml-sdk[automl_databricks]** as a PyPi library in Azure Databricks workspace. - Please remove the previous SDK version if there is any and install the latest SDK by installing **azureml-sdk[automl_databricks]** as a PyPi library in Azure Databricks workspace.
- You can find the detail Readme instructions at [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks). - You can find the detail Readme instructions at [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks).
- Download the sample notebook AutoML_Databricks_local_06.ipynb from [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks) and import into the Azure databricks workspace. - Download the sample notebook automl-databricks-local-01.ipynb from [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks) and import into the Azure databricks workspace.
- Attach the notebook to the cluster. - Attach the notebook to the cluster.
<a name="localconda"></a> <a name="localconda"></a>
@@ -169,16 +169,15 @@ bash automl_setup_linux.sh
- How to specifying sample_weight - How to specifying sample_weight
- The difference that it makes to test results - The difference that it makes to test results
- [auto-ml-subsampling-local.ipynb](subsampling/auto-ml-subsampling-local.ipynb)
- How to enable subsampling
- [auto-ml-dataprep.ipynb](dataprep/auto-ml-dataprep.ipynb) - [auto-ml-dataprep.ipynb](dataprep/auto-ml-dataprep.ipynb)
- Using DataPrep for reading data - Using DataPrep for reading data
- [auto-ml-dataprep-remote-execution.ipynb](dataprep-remote-execution/auto-ml-dataprep-remote-execution.ipynb) - [auto-ml-dataprep-remote-execution.ipynb](dataprep-remote-execution/auto-ml-dataprep-remote-execution.ipynb)
- Using DataPrep for reading data with remote execution - Using DataPrep for reading data with remote execution
- [auto-ml-classification-local-azuredatabricks.ipynb](classification-local-azuredatabricks/auto-ml-classification-local-azuredatabricks.ipynb)
- Dataset: scikit learn's [digit dataset](https://innovate.burningman.org/datasets-page/)
- Example of using AutoML for classification using Azure Databricks as the platform for training
- [auto-ml-classification-with-whitelisting.ipynb](classification-with-whitelisting/auto-ml-classification-with-whitelisting.ipynb) - [auto-ml-classification-with-whitelisting.ipynb](classification-with-whitelisting/auto-ml-classification-with-whitelisting.ipynb)
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits) - Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
- Simple example of using Auto ML for classification with whitelisting tensorflow models. - Simple example of using Auto ML for classification with whitelisting tensorflow models.

View File

@@ -13,19 +13,7 @@ dependencies:
- pandas>=0.22.0,<0.23.0 - pandas>=0.22.0,<0.23.0
- tensorflow>=1.12.0 - tensorflow>=1.12.0
# Required for azuremlftk
- dill
- pyodbc
- statsmodels
- numexpr
- keras
- distributed>=1.21.5,<1.24
- pip: - pip:
# Required for azuremlftk
- https://azuremlpackages.blob.core.windows.net/forecasting/azuremlftk-0.1.18323.5a1-py3-none-any.whl
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-sdk[automl,notebooks,explain] - azureml-sdk[automl,notebooks,explain]
- pandas_ml - pandas_ml

View File

@@ -13,19 +13,7 @@ dependencies:
- pandas>=0.22.0,<0.23.0 - pandas>=0.22.0,<0.23.0
- tensorflow>=1.12.0 - tensorflow>=1.12.0
# Required for azuremlftk
- dill
- pyodbc
- statsmodels
- numexpr
- keras
- distributed>=1.21.5,<1.24
- pip: - pip:
# Required for azuremlftk
- https://azuremlpackages.blob.core.windows.net/forecasting/azuremlftk-0.1.18323.5a1-py3-none-any.whl
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-sdk[automl,notebooks,explain] - azureml-sdk[automl,notebooks,explain]
- pandas_ml - pandas_ml

View File

@@ -1,6 +1,7 @@
@echo off @echo off
set conda_env_name=%1 set conda_env_name=%1
set automl_env_file=%2 set automl_env_file=%2
set options=%3
set PIP_NO_WARN_SCRIPT_LOCATION=0 set PIP_NO_WARN_SCRIPT_LOCATION=0
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl" IF "%conda_env_name%"=="" SET conda_env_name="azure_automl"
@@ -23,15 +24,21 @@ if errorlevel 1 goto ErrorExit
call python -m ipykernel install --user --name %conda_env_name% --display-name "Python (%conda_env_name%)" call python -m ipykernel install --user --name %conda_env_name% --display-name "Python (%conda_env_name%)"
REM azureml.widgets is now installed as part of the pip install under the conda env.
REM Removing the old user install so that the notebooks will use the latest widget.
call jupyter nbextension uninstall --user --py azureml.widgets
echo. echo.
echo. echo.
echo *************************************** echo ***************************************
echo * AutoML setup completed successfully * echo * AutoML setup completed successfully *
echo *************************************** echo ***************************************
echo. IF NOT "%options%"=="nolaunch" (
echo Starting jupyter notebook - please run the configuration notebook echo.
echo. echo Starting jupyter notebook - please run the configuration notebook
jupyter notebook --log-level=50 --notebook-dir='..\..' echo.
jupyter notebook --log-level=50 --notebook-dir='..\..'
)
goto End goto End

View File

@@ -2,6 +2,7 @@
CONDA_ENV_NAME=$1 CONDA_ENV_NAME=$1
AUTOML_ENV_FILE=$2 AUTOML_ENV_FILE=$2
OPTIONS=$3
PIP_NO_WARN_SCRIPT_LOCATION=0 PIP_NO_WARN_SCRIPT_LOCATION=0
if [ "$CONDA_ENV_NAME" == "" ] if [ "$CONDA_ENV_NAME" == "" ]
@@ -22,20 +23,25 @@ fi
if source activate $CONDA_ENV_NAME 2> /dev/null if source activate $CONDA_ENV_NAME 2> /dev/null
then then
echo "Upgrading azureml-sdk[automl,notebooks,explain] in existing conda environment" $CONDA_ENV_NAME echo "Upgrading azureml-sdk[automl,notebooks,explain] in existing conda environment" $CONDA_ENV_NAME
pip install --upgrade azureml-sdk[automl,notebooks,explain] pip install --upgrade azureml-sdk[automl,notebooks,explain] &&
jupyter nbextension uninstall --user --py azureml.widgets
else else
conda env create -f $AUTOML_ENV_FILE -n $CONDA_ENV_NAME && conda env create -f $AUTOML_ENV_FILE -n $CONDA_ENV_NAME &&
source activate $CONDA_ENV_NAME && source activate $CONDA_ENV_NAME &&
python -m ipykernel install --user --name $CONDA_ENV_NAME --display-name "Python ($CONDA_ENV_NAME)" && python -m ipykernel install --user --name $CONDA_ENV_NAME --display-name "Python ($CONDA_ENV_NAME)" &&
jupyter nbextension uninstall --user --py azureml.widgets &&
echo "" && echo "" &&
echo "" && echo "" &&
echo "***************************************" && echo "***************************************" &&
echo "* AutoML setup completed successfully *" && echo "* AutoML setup completed successfully *" &&
echo "***************************************" && echo "***************************************" &&
if [ "$OPTIONS" != "nolaunch" ]
then
echo "" && echo "" &&
echo "Starting jupyter notebook - please run the configuration notebook" && echo "Starting jupyter notebook - please run the configuration notebook" &&
echo "" && echo "" &&
jupyter notebook --log-level=50 --notebook-dir '../..' jupyter notebook --log-level=50 --notebook-dir '../..'
fi
fi fi
if [ $? -gt 0 ] if [ $? -gt 0 ]

View File

@@ -2,6 +2,7 @@
CONDA_ENV_NAME=$1 CONDA_ENV_NAME=$1
AUTOML_ENV_FILE=$2 AUTOML_ENV_FILE=$2
OPTIONS=$3
PIP_NO_WARN_SCRIPT_LOCATION=0 PIP_NO_WARN_SCRIPT_LOCATION=0
if [ "$CONDA_ENV_NAME" == "" ] if [ "$CONDA_ENV_NAME" == "" ]
@@ -22,22 +23,27 @@ fi
if source activate $CONDA_ENV_NAME 2> /dev/null if source activate $CONDA_ENV_NAME 2> /dev/null
then then
echo "Upgrading azureml-sdk[automl,notebooks,explain] in existing conda environment" $CONDA_ENV_NAME echo "Upgrading azureml-sdk[automl,notebooks,explain] in existing conda environment" $CONDA_ENV_NAME
pip install --upgrade azureml-sdk[automl,notebooks,explain] pip install --upgrade azureml-sdk[automl,notebooks,explain] &&
jupyter nbextension uninstall --user --py azureml.widgets
else else
conda env create -f $AUTOML_ENV_FILE -n $CONDA_ENV_NAME && conda env create -f $AUTOML_ENV_FILE -n $CONDA_ENV_NAME &&
source activate $CONDA_ENV_NAME && source activate $CONDA_ENV_NAME &&
conda install lightgbm -c conda-forge -y && conda install lightgbm -c conda-forge -y &&
python -m ipykernel install --user --name $CONDA_ENV_NAME --display-name "Python ($CONDA_ENV_NAME)" && python -m ipykernel install --user --name $CONDA_ENV_NAME --display-name "Python ($CONDA_ENV_NAME)" &&
pip install numpy==1.15.3 jupyter nbextension uninstall --user --py azureml.widgets &&
pip install numpy==1.15.3 &&
echo "" && echo "" &&
echo "" && echo "" &&
echo "***************************************" && echo "***************************************" &&
echo "* AutoML setup completed successfully *" && echo "* AutoML setup completed successfully *" &&
echo "***************************************" && echo "***************************************" &&
if [ "$OPTIONS" != "nolaunch" ]
then
echo "" && echo "" &&
echo "Starting jupyter notebook - please run the configuration notebook" && echo "Starting jupyter notebook - please run the configuration notebook" &&
echo "" && echo "" &&
jupyter notebook --log-level=50 --notebook-dir '../..' jupyter notebook --log-level=50 --notebook-dir '../..'
fi
fi fi
if [ $? -gt 0 ] if [ $? -gt 0 ]

View File

@@ -62,11 +62,8 @@
"source": [ "source": [
"import json\n", "import json\n",
"import logging\n", "import logging\n",
"import os\n",
"import random\n",
"\n", "\n",
"from matplotlib import pyplot as plt\n", "from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"import numpy as np\n", "import numpy as np\n",
"import pandas as pd\n", "import pandas as pd\n",
"from sklearn import datasets\n", "from sklearn import datasets\n",
@@ -102,7 +99,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n", "output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data=output, index=['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {
@@ -228,7 +226,8 @@
"description = 'AutoML Model'\n", "description = 'AutoML Model'\n",
"tags = None\n", "tags = None\n",
"model = local_run.register_model(description = description, tags = tags)\n", "model = local_run.register_model(description = description, tags = tags)\n",
"local_run.model_id # This will be written to the script file later in the notebook." "\n",
"print(local_run.model_id) # This will be written to the script file later in the notebook."
] ]
}, },
{ {

View File

@@ -61,11 +61,8 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import logging\n", "import logging\n",
"import os\n",
"import random\n",
"\n", "\n",
"from matplotlib import pyplot as plt\n", "from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"import numpy as np\n", "import numpy as np\n",
"import pandas as pd\n", "import pandas as pd\n",
"from sklearn import datasets\n", "from sklearn import datasets\n",
@@ -73,8 +70,7 @@
"import azureml.core\n", "import azureml.core\n",
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig"
"from azureml.train.automl.run import AutoMLRun"
] ]
}, },
{ {
@@ -100,7 +96,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n", "output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data = output, index = ['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {
@@ -135,8 +132,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from sklearn import datasets\n",
"\n",
"digits = datasets.load_digits()\n", "digits = datasets.load_digits()\n",
"\n", "\n",
"# Exclude the first 100 rows from training so that they can be used for test.\n", "# Exclude the first 100 rows from training so that they can be used for test.\n",

View File

@@ -60,11 +60,8 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import logging\n", "import logging\n",
"import os\n",
"import random\n",
"\n", "\n",
"from matplotlib import pyplot as plt\n", "from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"import numpy as np\n", "import numpy as np\n",
"import pandas as pd\n", "import pandas as pd\n",
"from sklearn import datasets\n", "from sklearn import datasets\n",
@@ -72,8 +69,7 @@
"import azureml.core\n", "import azureml.core\n",
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig"
"from azureml.train.automl.run import AutoMLRun"
] ]
}, },
{ {
@@ -99,7 +95,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n", "output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data = output, index = ['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {
@@ -134,8 +131,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from sklearn import datasets\n",
"\n",
"digits = datasets.load_digits()\n", "digits = datasets.load_digits()\n",
"\n", "\n",
"# Exclude the first 100 rows from training so that they can be used for test.\n", "# Exclude the first 100 rows from training so that they can be used for test.\n",

View File

@@ -1,154 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated Machine Learning Configuration\n",
"\n",
"In this example you will create an Azure Machine Learning `Workspace` object and initialize your notebook directory to easily reload this object from a configuration file. Typically you will only need to run this once per notebook directory, and all other notebooks in this directory or any sub-directories will automatically use the settings you indicate here.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Check the Azure ML Core SDK Version to Validate Your Installation"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"\n",
"print(\"SDK Version:\", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize an Azure ML Workspace\n",
"### What is an Azure ML Workspace and Why Do I Need One?\n",
"\n",
"An Azure ML workspace is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, operationalization, and the monitoring of operationalized models.\n",
"\n",
"\n",
"### What do I Need?\n",
"\n",
"To create or access an Azure ML workspace, you will need to import the Azure ML library and specify following information:\n",
"* A name for your workspace. You can choose one.\n",
"* Your subscription id. Use the `id` value from the `az account show` command output above.\n",
"* The resource group name. The resource group organizes Azure resources and provides a default region for the resources in the group. The resource group will be created if it doesn't exist. Resource groups can be created and viewed in the [Azure portal](https://portal.azure.com)\n",
"* Supported regions include `eastus2`, `eastus`,`westcentralus`, `southeastasia`, `westeurope`, `australiaeast`, `westus2`, `southcentralus`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"subscription_id = \"<subscription_id>\"\n",
"resource_group = \"myrg\"\n",
"workspace_name = \"myws\"\n",
"workspace_region = \"eastus2\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Creating a Workspace\n",
"If you already have access to an Azure ML workspace you want to use, you can skip this cell. Otherwise, this cell will create an Azure ML workspace for you in the specified subscription, provided you have the correct permissions for the given `subscription_id`.\n",
"\n",
"This will fail when:\n",
"1. The workspace already exists.\n",
"2. You do not have permission to create a workspace in the resource group.\n",
"3. You are not a subscription owner or contributor and no Azure ML workspaces have ever been created in this subscription.\n",
"\n",
"If workspace creation fails for any reason other than already existing, please work with your IT administrator to provide you with the appropriate permissions or to provision the required resources.\n",
"\n",
"**Note:** Creation of a new workspace can take several minutes."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Import the Workspace class and check the Azure ML SDK version.\n",
"from azureml.core import Workspace\n",
"\n",
"ws = Workspace.create(name = workspace_name,\n",
" subscription_id = subscription_id,\n",
" resource_group = resource_group, \n",
" location = workspace_region)\n",
"ws.get_details()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Configuring Your Local Environment\n",
"You can validate that you have access to the specified workspace and write a configuration file to the default configuration location, `./aml_config/config.json`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Workspace\n",
"\n",
"ws = Workspace(workspace_name = workspace_name,\n",
" subscription_id = subscription_id,\n",
" resource_group = resource_group)\n",
"\n",
"# Persist the subscription id, resource group name, and workspace name in aml_config/config.json.\n",
"ws.write_config()"
]
}
],
"metadata": {
"authors": [
{
"name": "savitam"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -80,7 +80,6 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import logging\n", "import logging\n",
"import os\n",
"import time\n", "import time\n",
"\n", "\n",
"import pandas as pd\n", "import pandas as pd\n",
@@ -117,7 +116,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n", "output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data = output, index = ['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {
@@ -323,7 +323,6 @@
" metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}\n", " metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}\n",
" metricslist[int(properties['iteration'])] = metrics\n", " metricslist[int(properties['iteration'])] = metrics\n",
" \n", " \n",
"import pandas as pd\n",
"rundata = pd.DataFrame(metricslist).sort_index(1)\n", "rundata = pd.DataFrame(metricslist).sort_index(1)\n",
"rundata" "rundata"
] ]
@@ -427,8 +426,6 @@
"source": [ "source": [
"#Randomly select digits and test\n", "#Randomly select digits and test\n",
"from matplotlib import pyplot as plt\n", "from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"import random\n",
"import numpy as np\n", "import numpy as np\n",
"\n", "\n",
"for index in np.random.choice(len(y_test), 2, replace = False):\n", "for index in np.random.choice(len(y_test), 2, replace = False):\n",
@@ -482,7 +479,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"digits_complete.to_pandas_dataframe().shape\n", "print(digits_complete.to_pandas_dataframe().shape)\n",
"labels_column = 'Column64'\n", "labels_column = 'Column64'\n",
"dflow_X = digits_complete.drop_columns(columns = [labels_column])\n", "dflow_X = digits_complete.drop_columns(columns = [labels_column])\n",
"dflow_y = digits_complete.keep_columns(columns = [labels_column])" "dflow_y = digits_complete.keep_columns(columns = [labels_column])"

View File

@@ -80,7 +80,6 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import logging\n", "import logging\n",
"import os\n",
"\n", "\n",
"import pandas as pd\n", "import pandas as pd\n",
"\n", "\n",
@@ -115,7 +114,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n", "output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data = output, index = ['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {
@@ -274,7 +274,6 @@
" metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}\n", " metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}\n",
" metricslist[int(properties['iteration'])] = metrics\n", " metricslist[int(properties['iteration'])] = metrics\n",
" \n", " \n",
"import pandas as pd\n",
"rundata = pd.DataFrame(metricslist).sort_index(1)\n", "rundata = pd.DataFrame(metricslist).sort_index(1)\n",
"rundata" "rundata"
] ]
@@ -378,8 +377,6 @@
"source": [ "source": [
"#Randomly select digits and test\n", "#Randomly select digits and test\n",
"from matplotlib import pyplot as plt\n", "from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"import random\n",
"import numpy as np\n", "import numpy as np\n",
"\n", "\n",
"for index in np.random.choice(len(y_test), 2, replace = False):\n", "for index in np.random.choice(len(y_test), 2, replace = False):\n",
@@ -433,7 +430,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"digits_complete.to_pandas_dataframe().shape\n", "print(digits_complete.to_pandas_dataframe().shape)\n",
"labels_column = 'Column64'\n", "labels_column = 'Column64'\n",
"dflow_X = digits_complete.drop_columns(columns = [labels_column])\n", "dflow_X = digits_complete.drop_columns(columns = [labels_column])\n",
"dflow_y = digits_complete.keep_columns(columns = [labels_column])" "dflow_y = digits_complete.keep_columns(columns = [labels_column])"

View File

@@ -53,22 +53,11 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import logging\n",
"import os\n",
"import random\n",
"import re\n",
"\n",
"from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"import numpy as np\n",
"import pandas as pd\n", "import pandas as pd\n",
"from sklearn import datasets\n", "import json\n",
"\n", "\n",
"import azureml.core\n",
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.core.run import Run\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n",
"from azureml.train.automl.run import AutoMLRun" "from azureml.train.automl.run import AutoMLRun"
] ]
}, },
@@ -152,7 +141,7 @@
"for run in automl_runs:\n", "for run in automl_runs:\n",
" properties = run.get_properties()\n", " properties = run.get_properties()\n",
" tags = run.get_tags()\n", " tags = run.get_tags()\n",
" amlsettings = eval(properties['RawAMLSettingsString'])\n", " amlsettings = json.loads(properties['AMLSettingsJsonString'])\n",
" if 'iterations' in tags:\n", " if 'iterations' in tags:\n",
" iterations = tags['iterations']\n", " iterations = tags['iterations']\n",
" else:\n", " else:\n",
@@ -196,7 +185,7 @@
"properties = ml_run.get_properties()\n", "properties = ml_run.get_properties()\n",
"tags = ml_run.get_tags()\n", "tags = ml_run.get_tags()\n",
"status = ml_run.get_details()\n", "status = ml_run.get_details()\n",
"amlsettings = eval(properties['RawAMLSettingsString'])\n", "amlsettings = json.loads(properties['AMLSettingsJsonString'])\n",
"if 'iterations' in tags:\n", "if 'iterations' in tags:\n",
" iterations = tags['iterations']\n", " iterations = tags['iterations']\n",
"else:\n", "else:\n",
@@ -297,7 +286,7 @@
"description = 'AutoML Model'\n", "description = 'AutoML Model'\n",
"tags = None\n", "tags = None\n",
"ml_run.register_model(description = description, tags = tags)\n", "ml_run.register_model(description = description, tags = tags)\n",
"ml_run.model_id # Use this id to deploy the model as a web service in Azure." "print(ml_run.model_id) # Use this id to deploy the model as a web service in Azure."
] ]
}, },
{ {

View File

@@ -44,9 +44,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Setup\n", "## Setup\n"
"\n",
"As part of the setup you have already created a <b>Workspace</b>. For AutoML you would need to create an <b>Experiment</b>. An <b>Experiment</b> is a named object in a <b>Workspace</b>, which is used to run experiments."
] ]
}, },
{ {
@@ -58,7 +56,6 @@
"import azureml.core\n", "import azureml.core\n",
"import pandas as pd\n", "import pandas as pd\n",
"import numpy as np\n", "import numpy as np\n",
"import os\n",
"import logging\n", "import logging\n",
"import warnings\n", "import warnings\n",
"# Squash warning messages for cleaner output in the notebook\n", "# Squash warning messages for cleaner output in the notebook\n",
@@ -68,12 +65,17 @@
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig\n",
"from azureml.train.automl.run import AutoMLRun\n",
"from matplotlib import pyplot as plt\n", "from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score" "from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As part of the setup you have already created a <b>Workspace</b>. For AutoML you would need to create an <b>Experiment</b>. An <b>Experiment</b> is a named object in a <b>Workspace</b>, which is used to run experiments."
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
@@ -98,7 +100,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Run History Name'] = experiment_name\n", "output['Run History Name'] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data=output, index=['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {
@@ -289,61 +292,6 @@
"y_pred" "y_pred"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Define a Check Data Function\n",
"\n",
"Remove the nan values from y_test to avoid error when calculate metrics "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def _check_calc_input(y_true, y_pred, rm_na=True):\n",
" \"\"\"\n",
" Check that 'y_true' and 'y_pred' are non-empty and\n",
" have equal length.\n",
"\n",
" :param y_true: Vector of actual values\n",
" :type y_true: array-like\n",
"\n",
" :param y_pred: Vector of predicted values\n",
" :type y_pred: array-like\n",
"\n",
" :param rm_na:\n",
" If rm_na=True, remove entries where y_true=NA and y_pred=NA.\n",
" :type rm_na: boolean\n",
"\n",
" :return:\n",
" Tuple (y_true, y_pred). if rm_na=True,\n",
" the returned vectors may differ from their input values.\n",
" :rtype: Tuple with 2 entries\n",
" \"\"\"\n",
" if len(y_true) != len(y_pred):\n",
" raise ValueError(\n",
" 'the true values and prediction values do not have equal length.')\n",
" elif len(y_true) == 0:\n",
" raise ValueError(\n",
" 'y_true and y_pred are empty.')\n",
" # if there is any non-numeric element in the y_true or y_pred,\n",
" # the ValueError exception will be thrown.\n",
" y_true = np.array(y_true).astype(float)\n",
" y_pred = np.array(y_pred).astype(float)\n",
" if rm_na:\n",
" # remove entries both in y_true and y_pred where at least\n",
" # one element in y_true or y_pred is missing\n",
" y_true_rm_na = y_true[~(np.isnan(y_true) | np.isnan(y_pred))]\n",
" y_pred_rm_na = y_pred[~(np.isnan(y_true) | np.isnan(y_pred))]\n",
" return (y_true_rm_na, y_pred_rm_na)\n",
" else:\n",
" return y_true, y_pred"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -357,7 +305,22 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"y_test,y_pred = _check_calc_input(y_test,y_pred)" "if len(y_test) != len(y_pred):\n",
" raise ValueError(\n",
" 'the true values and prediction values do not have equal length.')\n",
"elif len(y_test) == 0:\n",
" raise ValueError(\n",
" 'y_true and y_pred are empty.')\n",
"\n",
"# if there is any non-numeric element in the y_true or y_pred,\n",
"# the ValueError exception will be thrown.\n",
"y_test_f = np.array(y_test).astype(float)\n",
"y_pred_f = np.array(y_pred).astype(float)\n",
"\n",
"# remove entries both in y_true and y_pred where at least\n",
"# one element in y_true or y_pred is missing\n",
"y_test = y_test_f[~(np.isnan(y_test_f) | np.isnan(y_pred_f))]\n",
"y_pred = y_pred_f[~(np.isnan(y_test_f) | np.isnan(y_pred_f))]"
] ]
}, },
{ {
@@ -410,7 +373,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.6.6" "version": "3.6.8"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@@ -38,16 +38,14 @@
"3. Find and train a forecasting model using local compute\n", "3. Find and train a forecasting model using local compute\n",
"4. Evaluate the performance of the model\n", "4. Evaluate the performance of the model\n",
"\n", "\n",
"The examples in the follow code samples use the [University of Chicago's Dominick's Finer Foods dataset](https://research.chicagobooth.edu/kilts/marketing-databases/dominicks) to forecast orange juice sales. Dominick's was a grocery chain in the Chicago metropolitan area." "The examples in the follow code samples use the University of Chicago's Dominick's Finer Foods dataset to forecast orange juice sales. Dominick's was a grocery chain in the Chicago metropolitan area."
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Setup\n", "## Setup"
"\n",
"As part of the setup you have already created a <b>Workspace</b>. To run AutoML, you also need to create an <b>Experiment</b>. An Experiment is a named object in a Workspace which represents a predictive task, the output of which is a trained model and a set of evaluation metrics for the model. "
] ]
}, },
{ {
@@ -59,7 +57,6 @@
"import azureml.core\n", "import azureml.core\n",
"import pandas as pd\n", "import pandas as pd\n",
"import numpy as np\n", "import numpy as np\n",
"import os\n",
"import logging\n", "import logging\n",
"import warnings\n", "import warnings\n",
"# Squash warning messages for cleaner output in the notebook\n", "# Squash warning messages for cleaner output in the notebook\n",
@@ -69,10 +66,16 @@
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig\n",
"from azureml.train.automl.run import AutoMLRun\n",
"from sklearn.metrics import mean_absolute_error, mean_squared_error" "from sklearn.metrics import mean_absolute_error, mean_squared_error"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As part of the setup you have already created a <b>Workspace</b>. To run AutoML, you also need to create an <b>Experiment</b>. An Experiment is a named object in a Workspace which represents a predictive task, the output of which is a trained model and a set of evaluation metrics for the model. "
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
@@ -97,7 +100,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Run History Name'] = experiment_name\n", "output['Run History Name'] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data=output, index=['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {
@@ -405,7 +409,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.6.6" "version": "3.6.8"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@@ -63,11 +63,8 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import logging\n", "import logging\n",
"import os\n",
"import random\n",
"\n", "\n",
"from matplotlib import pyplot as plt\n", "from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"import numpy as np\n", "import numpy as np\n",
"import pandas as pd\n", "import pandas as pd\n",
"from sklearn import datasets\n", "from sklearn import datasets\n",
@@ -75,8 +72,7 @@
"import azureml.core\n", "import azureml.core\n",
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig"
"from azureml.train.automl.run import AutoMLRun"
] ]
}, },
{ {
@@ -102,7 +98,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n", "output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data=output, index=['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {
@@ -135,8 +132,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from scipy import sparse\n",
"\n",
"digits = datasets.load_digits()\n", "digits = datasets.load_digits()\n",
"X_train = digits.data[10:,:]\n", "X_train = digits.data[10:,:]\n",
"y_train = digits.target[10:]\n", "y_train = digits.target[10:]\n",

View File

@@ -57,15 +57,12 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import logging\n", "import logging\n",
"import os\n",
"import random\n",
"\n", "\n",
"import pandas as pd\n", "import pandas as pd\n",
"import azureml.core\n", "import azureml.core\n",
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig"
"from azureml.train.automl.run import AutoMLRun"
] ]
}, },
{ {
@@ -92,7 +89,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n", "output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data = output, index = ['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {

View File

@@ -58,20 +58,15 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import logging\n", "import logging\n",
"import os\n",
"import random\n",
"\n", "\n",
"from matplotlib import pyplot as plt\n", "from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"import numpy as np\n", "import numpy as np\n",
"import pandas as pd\n", "import pandas as pd\n",
"from sklearn import datasets\n",
"\n", "\n",
"import azureml.core\n", "import azureml.core\n",
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig"
"from azureml.train.automl.run import AutoMLRun"
] ]
}, },
{ {
@@ -97,7 +92,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n", "output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data = output, index = ['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {
@@ -354,9 +350,6 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"%matplotlib inline\n", "%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"from sklearn import datasets\n",
"from sklearn.metrics import mean_squared_error, r2_score\n", "from sklearn.metrics import mean_squared_error, r2_score\n",
"\n", "\n",
"# Set up a multi-plot chart.\n", "# Set up a multi-plot chart.\n",
@@ -375,8 +368,8 @@
"a0.set_ylabel('Residual Values', fontsize = 12)\n", "a0.set_ylabel('Residual Values', fontsize = 12)\n",
"\n", "\n",
"# Plot a histogram.\n", "# Plot a histogram.\n",
"a0.hist(y_residual_train, orientation = 'horizontal', color = 'b', bins = 10, histtype = 'step');\n", "a0.hist(y_residual_train, orientation = 'horizontal', color = 'b', bins = 10, histtype = 'step')\n",
"a0.hist(y_residual_train, orientation = 'horizontal', color = 'b', alpha = 0.2, bins = 10);\n", "a0.hist(y_residual_train, orientation = 'horizontal', color = 'b', alpha = 0.2, bins = 10)\n",
"\n", "\n",
"# Plot residual values of test set.\n", "# Plot residual values of test set.\n",
"a1.axis([0, 90, -200, 200])\n", "a1.axis([0, 90, -200, 200])\n",

View File

@@ -66,21 +66,15 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import logging\n",
"import os\n", "import os\n",
"import random\n",
"\n", "\n",
"from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"import numpy as np\n", "import numpy as np\n",
"import pandas as pd\n", "import pandas as pd\n",
"from sklearn import datasets\n",
"\n", "\n",
"import azureml.core\n", "import azureml.core\n",
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig"
"from azureml.train.automl.run import AutoMLRun"
] ]
}, },
{ {
@@ -106,7 +100,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n", "output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data=output, index=['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {

View File

@@ -67,10 +67,8 @@
"source": [ "source": [
"import logging\n", "import logging\n",
"import os\n", "import os\n",
"import random\n",
"\n", "\n",
"from matplotlib import pyplot as plt\n", "from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"import numpy as np\n", "import numpy as np\n",
"import pandas as pd\n", "import pandas as pd\n",
"from sklearn import datasets\n", "from sklearn import datasets\n",
@@ -78,8 +76,7 @@
"import azureml.core\n", "import azureml.core\n",
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig"
"from azureml.train.automl.run import AutoMLRun"
] ]
}, },
{ {
@@ -105,7 +102,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n", "output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data = output, index = ['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {
@@ -170,7 +168,7 @@
" # If no min_node_count is provided, it will use the scale settings for the cluster.\n", " # If no min_node_count is provided, it will use the scale settings for the cluster.\n",
" compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)\n", " compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)\n",
" \n", " \n",
" # For a more detailed view of current AmlCompute status, use the 'status' property." " # For a more detailed view of current AmlCompute status, use get_status()."
] ]
}, },
{ {

View File

@@ -59,21 +59,16 @@
"source": [ "source": [
"import logging\n", "import logging\n",
"import os\n", "import os\n",
"import random\n",
"import time\n", "import time\n",
"\n", "\n",
"from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"import numpy as np\n", "import numpy as np\n",
"import pandas as pd\n", "import pandas as pd\n",
"from sklearn import datasets\n",
"\n", "\n",
"import azureml.core\n", "import azureml.core\n",
"from azureml.core.compute import DsvmCompute\n", "from azureml.core.compute import DsvmCompute\n",
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig"
"from azureml.train.automl.run import AutoMLRun"
] ]
}, },
{ {
@@ -100,7 +95,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n", "output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data=output, index=['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {
@@ -169,7 +165,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"mkdir data" "if not os.path.isdir('data'):\n",
" os.mkdir('data') "
] ]
}, },
{ {
@@ -218,7 +215,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core import Workspace, Datastore\n",
"#blob_datastore = Datastore(ws, blob_datastore_name)\n", "#blob_datastore = Datastore(ws, blob_datastore_name)\n",
"ds = ws.get_default_datastore()\n", "ds = ws.get_default_datastore()\n",
"print(ds.datastore_type, ds.account_name, ds.container_name)" "print(ds.datastore_type, ds.account_name, ds.container_name)"

View File

@@ -67,11 +67,9 @@
"source": [ "source": [
"import logging\n", "import logging\n",
"import os\n", "import os\n",
"import random\n",
"import time\n", "import time\n",
"\n", "\n",
"from matplotlib import pyplot as plt\n", "from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"import numpy as np\n", "import numpy as np\n",
"import pandas as pd\n", "import pandas as pd\n",
"from sklearn import datasets\n", "from sklearn import datasets\n",
@@ -79,8 +77,7 @@
"import azureml.core\n", "import azureml.core\n",
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig"
"from azureml.train.automl.run import AutoMLRun"
] ]
}, },
{ {
@@ -106,7 +103,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n", "output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data = output, index = ['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {
@@ -152,7 +150,7 @@
" dsvm_compute = DsvmCompute.create(ws, name = dsvm_name, provisioning_configuration = dsvm_config)\n", " dsvm_compute = DsvmCompute.create(ws, name = dsvm_name, provisioning_configuration = dsvm_config)\n",
" dsvm_compute.wait_for_completion(show_output = True)\n", " dsvm_compute.wait_for_completion(show_output = True)\n",
" print(\"Waiting one minute for ssh to be accessible\")\n", " print(\"Waiting one minute for ssh to be accessible\")\n",
" time.sleep(60) # Wait for ssh to be accessible" " time.sleep(90) # Wait for ssh to be accessible"
] ]
}, },
{ {

View File

@@ -51,11 +51,8 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import logging\n", "import logging\n",
"import os\n",
"import random\n",
"\n", "\n",
"from matplotlib import pyplot as plt\n", "from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"import numpy as np\n", "import numpy as np\n",
"import pandas as pd\n", "import pandas as pd\n",
"from sklearn import datasets\n", "from sklearn import datasets\n",
@@ -63,8 +60,7 @@
"import azureml.core\n", "import azureml.core\n",
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig"
"from azureml.train.automl.run import AutoMLRun"
] ]
}, },
{ {
@@ -93,7 +89,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n", "output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data = output, index = ['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {

View File

@@ -61,20 +61,13 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import logging\n", "import logging\n",
"import os\n",
"import random\n",
"\n", "\n",
"from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"import numpy as np\n",
"import pandas as pd\n", "import pandas as pd\n",
"from sklearn import datasets\n",
"\n", "\n",
"import azureml.core\n", "import azureml.core\n",
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig"
"from azureml.train.automl.run import AutoMLRun"
] ]
}, },
{ {
@@ -101,7 +94,8 @@
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n", "output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data=output, index=['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {

View File

@@ -0,0 +1,218 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated Machine Learning\n",
"_**Classification with Local Compute**_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Data](#Data)\n",
"1. [Train](#Train)\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"\n",
"In this example we will explore AutoML's subsampling feature. This is useful for training on large datasets to speed up the convergence.\n",
"\n",
"The setup is quiet similar to a normal classification, with the exception of the `enable_subsampling` option. Keep in mind that even with the `enable_subsampling` flag set, subsampling will only be run for large datasets (>= 50k rows) and large (>= 85) or no iteration restrictions.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"import numpy as np\n",
"import pandas as pd\n",
"\n",
"import azureml.core\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n",
"from azureml.train.automl.run import AutoMLRun"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"# Choose a name for the experiment and specify the project folder.\n",
"experiment_name = 'automl-subsampling'\n",
"project_folder = './sample_projects/automl-subsampling'\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['SDK version'] = azureml.core.VERSION\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace Name'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data = output, index = ['']).T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Opt-in diagnostics for better experience, quality, and security of future releases."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.telemetry import set_diagnostics_collection\n",
"set_diagnostics_collection(send_diagnostics = True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Data\n",
"\n",
"We will create a simple dataset using the numpy sin function just for this example. We need just over 50k rows."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"base = np.arange(60000)\n",
"cos = np.cos(base)\n",
"y = np.round(np.sin(base)).astype('int')\n",
"\n",
"# Exclude the first 100 rows from training so that they can be used for test.\n",
"X_train = np.hstack((base.reshape(-1, 1), cos.reshape(-1, 1)))\n",
"y_train = y"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train\n",
"\n",
"Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.\n",
"\n",
"|Property|Description|\n",
"|-|-|\n",
"|**enable_subsampling**|This enables subsampling as an option. However it does not guarantee subsampling will be used. It also depends on how large the dataset is and how many iterations it's expected to run at a minimum.|\n",
"|**iterations**|Number of iterations. Subsampling requires a lot of iterations at smaller percent so in order for subsampling to be used we need to set iterations to be a high number.|\n",
"|**experiment_timeout_minutes**|The experiment timeout, it's set to 5 right now to shorten the demo but it should probably be higher if we want to finish all the iterations.|\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_config = AutoMLConfig(task = 'classification',\n",
" debug_log = 'automl_errors.log',\n",
" primary_metric = 'accuracy',\n",
" iterations = 85,\n",
" experiment_timeout_minutes = 5,\n",
" n_cross_validations = 2,\n",
" verbosity = logging.INFO,\n",
" X = X_train, \n",
" y = y_train,\n",
" enable_subsampling=True,\n",
" path = project_folder)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while.\n",
"In this example, we specify `show_output = True` to print currently running iterations to the console."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"local_run = experiment.submit(automl_config, show_output = True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"authors": [
{
"name": "rogehe"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,70 +1,26 @@
Azure Databricks is a managed Spark offering on Azure and customers already use it for advanced analytics. It provides a collaborative Notebook based environment with CPU or GPU based compute cluster. Azure Databricks is a managed Spark offering on Azure and customers already use it for advanced analytics. It provides a collaborative Notebook based environment with CPU or GPU based compute cluster.
In this section, you will see sample notebooks on how to use Azure Machine Learning SDK with Azure Databricks. You can train a model using Spark MLlib and then deploy the model to ACI/AKS from within Azure Databricks. You can also use Automated ML capability (**public preview**) of Azure ML SDK with Azure Databricks. In this section, you will find sample notebooks on how to use Azure Machine Learning SDK with Azure Databricks. You can train a model using Spark MLlib and then deploy the model to ACI/AKS from within Azure Databricks. You can also use Automated ML capability (**public preview**) of Azure ML SDK with Azure Databricks.
- Customers who use Azure Databricks for advanced analytics can now use the same cluster to run experiments with or without automated machine learning. - Customers who use Azure Databricks for advanced analytics can now use the same cluster to run experiments with or without automated machine learning.
- You can keep the data within the same cluster. - You can keep the data within the same cluster.
- You can leverage the local worker nodes with autoscale and auto termination capabilities. - You can leverage the local worker nodes with autoscale and auto termination capabilities.
- You can use multiple cores of your Azure Databricks cluster to perform simultenous training. - You can use multiple cores of your Azure Databricks cluster to perform simultenous training.
- You can further tune the model generated by automated machine learning if you chose to. - You can further tune the model generated by automated machine learning if you chose to.
- Every run (including the best run) is available as a pipeline. - Every run (including the best run) is available as a pipeline, which you can tune further if needed.
- The model trained using Azure Databricks can be registered in Azure ML SDK workspace and then deployed to Azure managed compute (ACI or AKS) using the Azure Machine learning SDK. - The model trained using Azure Databricks can be registered in Azure ML SDK workspace and then deployed to Azure managed compute (ACI or AKS) using the Azure Machine learning SDK.
**Create Azure Databricks Cluster:** Please follow our [Azure doc](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#azure-databricks) to install the sdk in your Azure Databricks cluster before trying any of the sample notebooks.
Select New Cluster and fill in following detail: **Single file** -
- Cluster name: _yourclustername_ The following archive contains all the sample notebooks. You can the run notebooks after importing [DBC](Databricks_AMLSDK_1-4_6.dbc) in your Databricks workspace instead of downloading individually.
- Databricks Runtime: Any 4.x runtime.
- Python version: **3**
- Workers: 2 or higher.
These settings are only for using Automated Machine Learning on Databricks. Notebooks 1-4 have to be run sequentially & are related to Income prediction experiment based on this [dataset](https://archive.ics.uci.edu/ml/datasets/adult) and demonstrate how to data prep, train and operationalize a Spark ML model with Azure ML Python SDK from within Azure Databricks.
- Max. number of **concurrent iterations** in Automated ML settings is **<=** to the number of **worker nodes** in your Databricks cluster.
- Worker node VM types: **Memory optimized VM** preferred.
- Uncheck _Enable Autoscaling_
Notebook 6 is an Automated ML sample notebook for Classification.
It will take few minutes to create the cluster. Please ensure that the cluster state is running before proceeding further.
**Install Azure ML SDK without Automated ML capability on your Azure Databricks cluster**
- Select Import library
- Source: Upload Python Egg or PyPI
- PyPi Name: **azureml-sdk[databricks]**
**Install Azure ML with Automated ML SDK on your Azure Databricks cluster**
- Select Import library
- Source: Upload Python Egg or PyPI
- PyPi Name: **azureml-sdk[automl_databricks]**
**For installation with or without Automated ML**
- Click Install Library
- Do not select _Attach automatically to all clusters_. In case you have selected earlier then you can go to your Home folder and deselect it.
- Select the check box _Attach_ next to your cluster name
(More details on how to attach and detach libs are here - [https://docs.databricks.com/user-guide/libraries.html#attach-a-library-to-a-cluster](https://docs.databricks.com/user-guide/libraries.html#attach-a-library-to-a-cluster) )
- Ensure that there are no errors until Status changes to _Attached_. It may take a couple of minutes.
**Note** - If you have the old build the please deselect it from clusters installed libs > move to trash. Install the new build and restart the cluster. And if still there is an issue then detach and reattach your cluster.
iPython Notebooks 1-4 have to be run sequentially after making changes based on your subscription. The corresponding DBC archive contains all the notebooks and can be imported into your Databricks workspace. You can the run notebooks after importing [databricks_amlsdk](Databricks_AMLSDK_1-4_6.dbc) instead of downloading individually.
Notebooks 1-4 are related to Income prediction experiment based on this [dataset](https://archive.ics.uci.edu/ml/datasets/adult) and demonstrate how to data prep, train and operationalize a Spark ML model with Azure ML Python SDK from within Azure Databricks. Notebook 6 is an Automated ML sample notebook.
For details on SDK concepts, please refer to [notebooks](https://github.com/Azure/MachineLearningNotebooks).
Learn more about [how to use Azure Databricks as a development environment](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-environment#azure-databricks) for Azure Machine Learning service. Learn more about [how to use Azure Databricks as a development environment](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-environment#azure-databricks) for Azure Machine Learning service.
You can also use Azure Databricks as a compute target for [training models with an Azure Machine Learning pipeline](https://docs.microsoft.com/machine-learning/service/how-to-set-up-training-targets#databricks). For more on SDK concepts, please refer to [notebooks](https://github.com/Azure/MachineLearningNotebooks).
**Please let us know your feedback.** **Please let us know your feedback.**

View File

@@ -54,21 +54,6 @@
"print(\"SDK version:\", azureml.core.VERSION)" "print(\"SDK version:\", azureml.core.VERSION)"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"##TESTONLY\n",
"# import auth creds from notebook parameters\n",
"tenant = dbutils.widgets.get('tenant_id')\n",
"username = dbutils.widgets.get('service_principal_id')\n",
"password = dbutils.widgets.get('service_principal_password')\n",
"\n",
"auth = azureml.core.authentication.ServicePrincipalAuthentication(tenant, username, password)"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
@@ -91,15 +76,14 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"##PUBLISHONLY\n", "# import the Workspace class and check the azureml SDK version\n",
"## import the Workspace class and check the azureml SDK version\n", "from azureml.core import Workspace\n",
"#from azureml.core import Workspace\n", "\n",
"#\n", "ws = Workspace.from_config()\n",
"#ws = Workspace.from_config()\n", "print('Workspace name: ' + ws.name, \n",
"#print('Workspace name: ' + ws.name, \n", " 'Azure region: ' + ws.location, \n",
"# 'Azure region: ' + ws.location, \n", " 'Subscription id: ' + ws.subscription_id, \n",
"# 'Subscription id: ' + ws.subscription_id, \n", " 'Resource group: ' + ws.resource_group, sep = '\\n')"
"# 'Resource group: ' + ws.resource_group, sep = '\\n')"
] ]
}, },
{ {
@@ -372,9 +356,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python3" "name": "python36"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -39,21 +39,6 @@
"print(\"SDK version:\", azureml.core.VERSION)" "print(\"SDK version:\", azureml.core.VERSION)"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"##TESTONLY\n",
"# import auth creds from notebook parameters\n",
"tenant = dbutils.widgets.get('tenant_id')\n",
"username = dbutils.widgets.get('service_principal_id')\n",
"password = dbutils.widgets.get('service_principal_password')\n",
"\n",
"auth = azureml.core.authentication.ServicePrincipalAuthentication(tenant, username, password)"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
@@ -77,20 +62,19 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"##PUBLISHONLY\n", "from azureml.core import Workspace\n",
"#from azureml.core import Workspace\n", "import azureml.core\n",
"#import azureml.core\n", "\n",
"#\n", "# Check core SDK version number\n",
"## Check core SDK version number\n", "print(\"SDK version:\", azureml.core.VERSION)\n",
"#print(\"SDK version:\", azureml.core.VERSION)\n", "\n",
"#\n", "#'''\n",
"##'''\n", "ws = Workspace.from_config()\n",
"#ws = Workspace.from_config()\n", "print('Workspace name: ' + ws.name, \n",
"#print('Workspace name: ' + ws.name, \n", " 'Azure region: ' + ws.location, \n",
"# 'Azure region: ' + ws.location, \n", " 'Subscription id: ' + ws.subscription_id, \n",
"# 'Subscription id: ' + ws.subscription_id, \n", " 'Resource group: ' + ws.resource_group, sep = '\\n')\n",
"# 'Resource group: ' + ws.resource_group, sep = '\\n')\n", "#'''"
"##'''"
] ]
}, },
{ {
@@ -330,9 +314,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python3" "name": "python36"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -158,9 +158,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python3" "name": "python36"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -73,35 +73,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"##TESTONLY\n",
"# import auth creds from notebook parameters\n",
"tenant = dbutils.widgets.get('tenant_id')\n",
"username = dbutils.widgets.get('service_principal_id')\n",
"password = dbutils.widgets.get('service_principal_password')\n",
"\n",
"auth = azureml.core.authentication.ServicePrincipalAuthentication(tenant, username, password)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"##TESTONLY\n",
"subscription_id = dbutils.widgets.get('subscription_id')\n",
"resource_group = dbutils.widgets.get('resource_group')\n",
"workspace_name = dbutils.widgets.get('workspace_name')\n",
"workspace_region = dbutils.widgets.get('workspace_region')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"##TESTONLY\n",
"# import the Workspace class and check the azureml SDK version\n", "# import the Workspace class and check the azureml SDK version\n",
"# exist_ok checks if workspace exists or not.\n", "# exist_ok checks if workspace exists or not.\n",
"\n", "\n",
@@ -111,29 +82,9 @@
" subscription_id = subscription_id,\n", " subscription_id = subscription_id,\n",
" resource_group = resource_group, \n", " resource_group = resource_group, \n",
" location = workspace_region,\n", " location = workspace_region,\n",
" auth = auth,\n",
" exist_ok=True)" " exist_ok=True)"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"##PUBLISHONLY\n",
"## import the Workspace class and check the azureml SDK version\n",
"## exist_ok checks if workspace exists or not.\n",
"#\n",
"#from azureml.core import Workspace\n",
"#\n",
"#ws = Workspace.create(name = workspace_name,\n",
"# subscription_id = subscription_id,\n",
"# resource_group = resource_group, \n",
"# location = workspace_region,\n",
"# exist_ok=True)"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
@@ -150,31 +101,14 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"##TESTONLY\n",
"ws = Workspace(workspace_name = workspace_name,\n", "ws = Workspace(workspace_name = workspace_name,\n",
" subscription_id = subscription_id,\n", " subscription_id = subscription_id,\n",
" resource_group = resource_group,\n", " resource_group = resource_group)\n",
" auth = auth)\n",
"\n", "\n",
"# persist the subscription id, resource group name, and workspace name in aml_config/config.json.\n", "# persist the subscription id, resource group name, and workspace name in aml_config/config.json.\n",
"ws.write_config()" "ws.write_config()\n",
] "##if you need to give a different path/filename please use this\n",
}, "##write_config(path=\"/databricks/driver/aml_config/\",file_name=<alias_conf.cfg>)"
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"##PUBLISHONLY\n",
"#ws = Workspace(workspace_name = workspace_name,\n",
"# subscription_id = subscription_id,\n",
"# resource_group = resource_group)\n",
"#\n",
"## persist the subscription id, resource group name, and workspace name in aml_config/config.json.\n",
"#ws.write_config()\n",
"###if you need to give a different path/filename please use this\n",
"###write_config(path=\"/databricks/driver/aml_config/\",file_name=<alias_conf.cfg>)"
] ]
}, },
{ {
@@ -192,11 +126,10 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"##TESTONLY\n",
"# import the Workspace class and check the azureml SDK version\n", "# import the Workspace class and check the azureml SDK version\n",
"from azureml.core import Workspace\n", "from azureml.core import Workspace\n",
"\n", "\n",
"ws = Workspace.from_config(auth = auth)\n", "ws = Workspace.from_config()\n",
"#ws = Workspace.from_config(<full path>)\n", "#ws = Workspace.from_config(<full path>)\n",
"print('Workspace name: ' + ws.name, \n", "print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n", " 'Azure region: ' + ws.location, \n",
@@ -204,24 +137,6 @@
" 'Resource group: ' + ws.resource_group, sep = '\\n')" " 'Resource group: ' + ws.resource_group, sep = '\\n')"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"##PUBLISHONLY\n",
"## import the Workspace class and check the azureml SDK version\n",
"#from azureml.core import Workspace\n",
"#\n",
"#ws = Workspace.from_config()\n",
"##ws = Workspace.from_config(<full path>)\n",
"#print('Workspace name: ' + ws.name, \n",
"# 'Azure region: ' + ws.location, \n",
"# 'Subscription id: ' + ws.subscription_id, \n",
"# 'Resource group: ' + ws.resource_group, sep = '\\n')"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
@@ -240,9 +155,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python3" "name": "python36"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -13,45 +13,31 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"We support installing AML SDK as library from GUI. When attaching a library follow this https://docs.databricks.com/user-guide/libraries.html and add the below string as your PyPi package. You can select the option to attach the library to all clusters or just one cluster.\n", "# Automated ML on Azure Databricks\n",
"\n", "\n",
"**install azureml-sdk with Automated ML**\n", "In this example we use the scikit-learn's <a href=\"http://scikit-learn.org/stable/datasets/index.html#optical-recognition-of-handwritten-digits-dataset\" target=\"_blank\">digit dataset</a> to showcase how you can use AutoML for a simple classification problem.\n",
"* Source: Upload Python Egg or PyPi\n",
"* PyPi Name: `azureml-sdk[automl_databricks]`\n",
"* Select Install Library"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# AutoML : Classification with Local Compute on Azure DataBricks\n",
"\n",
"In this example we use the scikit-learn's [digit dataset](http://scikit-learn.org/stable/datasets/index.html#optical-recognition-of-handwritten-digits-dataset) to showcase how you can use AutoML for a simple classification problem.\n",
"\n", "\n",
"In this notebook you will learn how to:\n", "In this notebook you will learn how to:\n",
"1. Create Azure Machine Learning Workspace object and initialize your notebook directory to easily reload this object from a configuration file.\n", "1. Create Azure Machine Learning Workspace object and initialize your notebook directory to easily reload this object from a configuration file.\n",
"2. Create an `Experiment` in an existing `Workspace`.\n", "2. Create an `Experiment` in an existing `Workspace`.\n",
"3. Configure AutoML using `AutoMLConfig`.\n", "3. Configure Automated ML using `AutoMLConfig`.\n",
"4. Train the model using AzureDataBricks.\n", "4. Train the model using Azure Databricks.\n",
"5. Explore the results.\n", "5. Explore the results.\n",
"6. Test the best fitted model.\n", "6. Test the best fitted model.\n",
"\n", "\n",
"Prerequisites:\n", "Before running this notebook, please follow the <a href=\"https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks\" target=\"_blank\">readme for using Automated ML on Azure Databricks</a> for installing necessary libraries to your cluster."
"Before running this notebook, please follow the readme for installing necessary libraries to your cluster."
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Register Machine Learning Services Resource Provider\n", "We support installing AML SDK with Automated ML as library from GUI. When attaching a library follow <a href=\"https://docs.databricks.com/user-guide/libraries.html\" target=\"_blank\">this link</a> and add the below string as your PyPi package. You can select the option to attach the library to all clusters or just one cluster.\n",
"Microsoft.MachineLearningServices only needs to be registed once in the subscription. To register it:\n", "\n",
"Start the Azure portal.\n", "**azureml-sdk with automated ml**\n",
"Select your All services and then Subscription.\n", "* Source: Upload Python Egg or PyPi\n",
"Select the subscription that you want to use.\n", "* PyPi Name: `azureml-sdk[automl_databricks]`\n",
"Click on Resource providers\n", "* Select Install Library"
"Click the Register link next to Microsoft.MachineLearningServices"
] ]
}, },
{ {
@@ -97,11 +83,10 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"##PUBLISHONLY\n", "subscription_id = \"<Your SubscriptionId>\" #you should be owner or contributor\n",
"#subscription_id = \"<Your SubscriptionId>\"\n", "resource_group = \"<Resource group - new or existing>\" #you should be owner or contributor\n",
"#resource_group = \"<Resource group - new or existing>\"\n", "workspace_name = \"<workspace to be created>\" #your workspace name\n",
"#workspace_name = \"<workspace to be created>\"\n", "workspace_region = \"<azureregion>\" #your region"
"#workspace_region = \"<azureregion>\" #eg. eastus2, westcentralus, westeurope"
] ]
}, },
{ {
@@ -121,34 +106,6 @@
"**Note:** Creation of a new workspace can take several minutes." "**Note:** Creation of a new workspace can take several minutes."
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"##TESTONLY\n",
"# import auth creds from notebook parameters\n",
"tenant = dbutils.widgets.get('tenant_id')\n",
"username = dbutils.widgets.get('service_principal_id')\n",
"password = dbutils.widgets.get('service_principal_password')\n",
"\n",
"auth = azureml.core.authentication.ServicePrincipalAuthentication(tenant, username, password)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"##TESTONLY\n",
"subscription_id = dbutils.widgets.get('subscription_id')\n",
"resource_group = dbutils.widgets.get('resource_group')\n",
"workspace_name = dbutils.widgets.get('workspace_name')\n",
"workspace_region = dbutils.widgets.get('workspace_region')"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
@@ -161,8 +118,7 @@
"ws = Workspace.create(name = workspace_name,\n", "ws = Workspace.create(name = workspace_name,\n",
" subscription_id = subscription_id,\n", " subscription_id = subscription_id,\n",
" resource_group = resource_group, \n", " resource_group = resource_group, \n",
" location = workspace_region,\n", " location = workspace_region, \n",
" auth = auth,\n",
" exist_ok=True)\n", " exist_ok=True)\n",
"ws.get_details()" "ws.get_details()"
] ]
@@ -172,22 +128,7 @@
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": []
"##PUBLISHONLY\n",
"#from azureml.core import Workspace\n",
"#import azureml.core\n",
"#\n",
"## Check core SDK version number\n",
"#print(\"SDK version:\", azureml.core.VERSION)\n",
"#\n",
"##'''\n",
"#ws = Workspace.from_config()\n",
"#print('Workspace name: ' + ws.name, \n",
"# 'Azure region: ' + ws.location, \n",
"# 'Subscription id: ' + ws.subscription_id, \n",
"# 'Resource group: ' + ws.resource_group, sep = '\\n')\n",
"##'''"
]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
@@ -203,35 +144,16 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"##TESTONLY\n",
"from azureml.core import Workspace\n", "from azureml.core import Workspace\n",
"\n", "\n",
"ws = Workspace(workspace_name = workspace_name,\n", "ws = Workspace(workspace_name = workspace_name,\n",
" subscription_id = subscription_id,\n", " subscription_id = subscription_id,\n",
" resource_group = resource_group,\n", " resource_group = resource_group)\n",
" auth = auth)\n",
"\n", "\n",
"# Persist the subscription id, resource group name, and workspace name in aml_config/config.json.\n", "# Persist the subscription id, resource group name, and workspace name in aml_config/config.json.\n",
"ws.write_config()" "ws.write_config()"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"##PUBLISHONLY\n",
"#from azureml.core import Workspace\n",
"#\n",
"#ws = Workspace(workspace_name = workspace_name,\n",
"# subscription_id = subscription_id,\n",
"# resource_group = resource_group)\n",
"#\n",
"## Persist the subscription id, resource group name, and workspace name in aml_config/config.json.\n",
"#ws.write_config()"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -262,7 +184,7 @@
"source": [ "source": [
"## Create an Experiment\n", "## Create an Experiment\n",
"\n", "\n",
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments." "As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
] ]
}, },
{ {
@@ -288,26 +210,6 @@
"from azureml.train.automl.run import AutoMLRun" "from azureml.train.automl.run import AutoMLRun"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"##TESTONLY\n",
"ws = Workspace.from_config(auth = auth)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"##PUBLISHONLY\n",
"#ws = Workspace.from_config(auth = auth)"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
@@ -364,6 +266,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"#Automated ML requires a dataflow, which is different from dataframe.\n",
"#If your data is in a dataframe, please use read_pandas_dataframe to convert a dataframe to dataflow before usind dprep.\n",
"\n",
"import azureml.dataprep as dprep\n", "import azureml.dataprep as dprep\n",
"# You can use `auto_read_file` which intelligently figures out delimiters and datatypes of a file.\n", "# You can use `auto_read_file` which intelligently figures out delimiters and datatypes of a file.\n",
"# The data referenced here was pulled from `sklearn.datasets.load_digits()`.\n", "# The data referenced here was pulled from `sklearn.datasets.load_digits()`.\n",
@@ -435,7 +340,6 @@
" spark_context=sc, #databricks/spark related\n", " spark_context=sc, #databricks/spark related\n",
" X = X_train, \n", " X = X_train, \n",
" y = y_train,\n", " y = y_train,\n",
" enable_cache=False,\n",
" path = project_folder)" " path = project_folder)"
] ]
}, },
@@ -480,7 +384,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(local_run.get_portal_url())" "displayHTML(\"<a href={} target='_blank'>Your experiment in Azure Portal: {}</a>\".format(local_run.get_portal_url(), local_run.id))"
] ]
}, },
{ {
@@ -608,7 +512,9 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"When deploying an automated ML trained model, please specify _pip_packages=['azureml-sdk[automl]']_ in your CondaDependencies." "When deploying an automated ML trained model, please specify _pippackages=['azureml-sdk[automl]']_ in your CondaDependencies.\n",
"\n",
"Please refer to only the **Deploy** section in this notebook - <a href=\"https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/classification-with-deployment\" target=\"_blank\">Deployment of Automated ML trained model</a>"
] ]
}, },
{ {
@@ -629,9 +535,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python3" "name": "python36"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
@@ -646,8 +552,8 @@
"version": "3.7.0" "version": "3.7.0"
}, },
"name": "auto-ml-classification-local-adb", "name": "auto-ml-classification-local-adb",
"notebookId": 3836944406456411 "notebookId": 817220787969977
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 1 "nbformat_minor": 0
} }

View File

@@ -0,0 +1,704 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We support installing AML SDK as library from GUI. When attaching a library follow this https://docs.databricks.com/user-guide/libraries.html and add the below string as your PyPi package. You can select the option to attach the library to all clusters or just one cluster.\n",
"\n",
"**install azureml-sdk with Automated ML**\n",
"* Source: Upload Python Egg or PyPi\n",
"* PyPi Name: `azureml-sdk[automl_databricks]`\n",
"* Select Install Library"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# AutoML : Classification with Local Compute on Azure DataBricks with deployment to ACI\n",
"\n",
"In this example we use the scikit-learn's [digit dataset](http://scikit-learn.org/stable/datasets/index.html#optical-recognition-of-handwritten-digits-dataset) to showcase how you can use AutoML for a simple classification problem.\n",
"\n",
"In this notebook you will learn how to:\n",
"1. Create Azure Machine Learning Workspace object and initialize your notebook directory to easily reload this object from a configuration file.\n",
"2. Create an `Experiment` in an existing `Workspace`.\n",
"3. Configure AutoML using `AutoMLConfig`.\n",
"4. Train the model using AzureDataBricks.\n",
"5. Explore the results.\n",
"6. Register the model.\n",
"7. Deploy the model.\n",
"8. Test the best fitted model.\n",
"\n",
"Prerequisites:\n",
"Before running this notebook, please follow the readme for installing necessary libraries to your cluster."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Register Machine Learning Services Resource Provider\n",
"Microsoft.MachineLearningServices only needs to be registed once in the subscription. To register it:\n",
"Start the Azure portal.\n",
"Select your All services and then Subscription.\n",
"Select the subscription that you want to use.\n",
"Click on Resource providers\n",
"Click the Register link next to Microsoft.MachineLearningServices"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Check the Azure ML Core SDK Version to Validate Your Installation"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"\n",
"print(\"SDK Version:\", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize an Azure ML Workspace\n",
"### What is an Azure ML Workspace and Why Do I Need One?\n",
"\n",
"An Azure ML workspace is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, operationalization, and the monitoring of operationalized models.\n",
"\n",
"\n",
"### What do I Need?\n",
"\n",
"To create or access an Azure ML workspace, you will need to import the Azure ML library and specify following information:\n",
"* A name for your workspace. You can choose one.\n",
"* Your subscription id. Use the `id` value from the `az account show` command output above.\n",
"* The resource group name. The resource group organizes Azure resources and provides a default region for the resources in the group. The resource group will be created if it doesn't exist. Resource groups can be created and viewed in the [Azure portal](https://portal.azure.com)\n",
"* Supported regions include `eastus2`, `eastus`,`westcentralus`, `southeastasia`, `westeurope`, `australiaeast`, `westus2`, `southcentralus`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"subscription_id = \"<Your SubscriptionId>\"\n",
"resource_group = \"<Resource group - new or existing>\"\n",
"workspace_name = \"<workspace to be created>\"\n",
"workspace_region = \"<azureregion>\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Creating a Workspace\n",
"If you already have access to an Azure ML workspace you want to use, you can skip this cell. Otherwise, this cell will create an Azure ML workspace for you in the specified subscription, provided you have the correct permissions for the given `subscription_id`.\n",
"\n",
"This will fail when:\n",
"1. The workspace already exists.\n",
"2. You do not have permission to create a workspace in the resource group.\n",
"3. You are not a subscription owner or contributor and no Azure ML workspaces have ever been created in this subscription.\n",
"\n",
"If workspace creation fails for any reason other than already existing, please work with your IT administrator to provide you with the appropriate permissions or to provision the required resources.\n",
"\n",
"**Note:** Creation of a new workspace can take several minutes."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Import the Workspace class and check the Azure ML SDK version.\n",
"from azureml.core import Workspace\n",
"\n",
"ws = Workspace.create(name = workspace_name,\n",
" subscription_id = subscription_id,\n",
" resource_group = resource_group, \n",
" location = workspace_region,\n",
" exist_ok=True)\n",
"ws.get_details()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Configuring Your Local Environment\n",
"You can validate that you have access to the specified workspace and write a configuration file to the default configuration location, `./aml_config/config.json`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Workspace\n",
"\n",
"ws = Workspace(workspace_name = workspace_name,\n",
" subscription_id = subscription_id,\n",
" resource_group = resource_group)\n",
"\n",
"# Persist the subscription id, resource group name, and workspace name in aml_config/config.json.\n",
"ws.write_config()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create a Folder to Host Sample Projects\n",
"Finally, create a folder where all the sample projects will be hosted."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"sample_projects_folder = './sample_projects'\n",
"\n",
"if not os.path.isdir(sample_projects_folder):\n",
" os.mkdir(sample_projects_folder)\n",
" \n",
"print('Sample projects will be created in {}.'.format(sample_projects_folder))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create an Experiment\n",
"\n",
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"import os\n",
"import random\n",
"import time\n",
"\n",
"from matplotlib import pyplot as plt\n",
"from matplotlib.pyplot import imshow\n",
"import numpy as np\n",
"import pandas as pd\n",
"\n",
"import azureml.core\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n",
"from azureml.train.automl.run import AutoMLRun"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Choose a name for the experiment and specify the project folder.\n",
"experiment_name = 'automl-local-classification'\n",
"project_folder = './sample_projects/automl-local-classification'\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['SDK version'] = azureml.core.VERSION\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace Name'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data = output, index = ['']).T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Diagnostics\n",
"\n",
"Opt-in diagnostics for better experience, quality, and security of future releases."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.telemetry import set_diagnostics_collection\n",
"set_diagnostics_collection(send_diagnostics = True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Training Data Using DataPrep"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.dataprep as dprep\n",
"# You can use `auto_read_file` which intelligently figures out delimiters and datatypes of a file.\n",
"# The data referenced here was pulled from `sklearn.datasets.load_digits()`.\n",
"simple_example_data_root = 'https://dprepdata.blob.core.windows.net/automl-notebook-data/'\n",
"X_train = dprep.auto_read_file(simple_example_data_root + 'X.csv').skip(1) # Remove the header row.\n",
"\n",
"# You can also use `read_csv` and `to_*` transformations to read (with overridable delimiter)\n",
"# and convert column types manually.\n",
"# Here we read a comma delimited file and convert all columns to integers.\n",
"y_train = dprep.read_csv(simple_example_data_root + 'y.csv').to_long(dprep.ColumnSelector(term='.*', use_regex = True))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Review the Data Preparation Result\n",
"You can peek the result of a Dataflow at any range using skip(i) and head(j). Doing so evaluates only j records for all the steps in the Dataflow, which makes it fast even against large datasets."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_train.skip(1).head(5)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Configure AutoML\n",
"\n",
"Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.\n",
"\n",
"|Property|Description|\n",
"|-|-|\n",
"|**task**|classification or regression|\n",
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
"|**primary_metric**|This is the metric that you want to optimize. Regression supports the following primary metrics: <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>|\n",
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
"|**n_cross_validations**|Number of cross validation splits.|\n",
"|**spark_context**|Spark Context object. for Databricks, use spark_context=sc|\n",
"|**max_concurrent_iterations**|Maximum number of iterations to execute in parallel. This should be <= number of worker nodes in your Azure Databricks cluster.|\n",
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|\n",
"|**preprocess**|set this to True to enable pre-processing of data eg. string to numeric using one-hot encoding|\n",
"|**exit_score**|Target score for experiment. It is associated with the metric. eg. exit_score=0.995 will exit experiment after that|"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_config = AutoMLConfig(task = 'classification',\n",
" debug_log = 'automl_errors.log',\n",
" primary_metric = 'AUC_weighted',\n",
" iteration_timeout_minutes = 10,\n",
" iterations = 5,\n",
" n_cross_validations = 2,\n",
" max_concurrent_iterations = 4, #change it based on number of worker nodes\n",
" verbosity = logging.INFO,\n",
" spark_context=sc, #databricks/spark related\n",
" X = X_train, \n",
" y = y_train,\n",
" enable_cache=False,\n",
" path = project_folder)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train the Models\n",
"\n",
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while.\n",
"In this example, we specify `show_output = True` to print currently running iterations to the console."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"local_run = experiment.submit(automl_config, show_output = True) # for higher runs please use show_output=False and use the below"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Explore the Results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Portal URL for Monitoring Runs\n",
"\n",
"The following will provide a link to the web interface to explore individual run details and status. In the future we might support output displayed in the notebook."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"displayHTML(\"<a href={} target='_blank'>Azure Portal: {}</a>\".format(local_run.get_portal_url(), local_run.id))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The following will show the child runs and waits for the parent run to complete."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Retrieve All Child Runs after the experiment is completed (in portal)\n",
"You can also use SDK methods to fetch all the child runs and see individual metrics that we log."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"children = list(local_run.get_children())\n",
"metricslist = {}\n",
"for run in children:\n",
" properties = run.get_properties()\n",
" metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)} \n",
" metricslist[int(properties['iteration'])] = metrics\n",
"\n",
"rundata = pd.DataFrame(metricslist).sort_index(1)\n",
"rundata"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Retrieve the Best Model after the above run is complete \n",
"\n",
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = local_run.get_output()\n",
"print(best_run)\n",
"print(fitted_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Best Model Based on Any Other Metric after the above run is complete based on the child run\n",
"Show the run and the model that has the smallest `log_loss` value:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"lookup_metric = \"log_loss\"\n",
"best_run, fitted_model = local_run.get_output(metric = lookup_metric)\n",
"print(best_run)\n",
"print(fitted_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Register the Fitted Model for Deployment\n",
"If neither metric nor iteration are specified in the register_model call, the iteration with the best primary metric is registered."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"description = 'AutoML Model'\n",
"tags = None\n",
"model = local_run.register_model(description = description, tags = tags)\n",
"local_run.model_id # This will be written to the scoring script file later in the notebook."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create Scoring Script\n",
"Replace model_id with name of model from output of above register cell"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%writefile score.py\n",
"import pickle\n",
"import json\n",
"import numpy\n",
"import azureml.train.automl\n",
"from sklearn.externals import joblib\n",
"from azureml.core.model import Model\n",
"\n",
"\n",
"def init():\n",
" global model\n",
" model_path = Model.get_model_path(model_name = '<<modelid>>') # this name is model.id of model that we want to deploy\n",
" # deserialize the model file back into a sklearn model\n",
" model = joblib.load(model_path)\n",
"\n",
"def run(rawdata):\n",
" try:\n",
" data = json.loads(rawdata)['data']\n",
" data = numpy.array(data)\n",
" result = model.predict(data)\n",
" except Exception as e:\n",
" result = str(e)\n",
" return json.dumps({\"error\": result})\n",
" return json.dumps({\"result\":result.tolist()})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Create a YAML File for the Environment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.conda_dependencies import CondaDependencies\n",
"\n",
"myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'], pip_packages=['azureml-sdk[automl]'])\n",
"\n",
"conda_env_file_name = 'mydeployenv.yml'\n",
"myenv.save_to_file('.', conda_env_file_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Create ACI config"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#deploy to ACI\n",
"from azureml.core.webservice import AciWebservice, Webservice\n",
"\n",
"myaci_config = AciWebservice.deploy_configuration(\n",
" cpu_cores = 2, \n",
" memory_gb = 2, \n",
" tags = {'name':'Databricks Azure ML ACI'}, \n",
" description = 'This is for ADB and AutoML example.')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deploy the Image as a Web Service on Azure Container Instance\n",
"Replace servicename with any meaningful name of service"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"# this will take 10-15 minutes to finish\n",
"\n",
"service_name = \"<<servicename>>\"\n",
"runtime = \"spark-py\" \n",
"driver_file = \"score.py\"\n",
"my_conda_file = \"mydeployenv.yml\"\n",
"\n",
"# image creation\n",
"from azureml.core.image import ContainerImage\n",
"myimage_config = ContainerImage.image_configuration(execution_script = driver_file, \n",
" runtime = runtime, \n",
" conda_file = 'mydeployenv.yml')\n",
"\n",
"# Webservice creation\n",
"myservice = Webservice.deploy_from_model(\n",
" workspace=ws, \n",
" name=service_name,\n",
" deployment_config = myaci_config,\n",
" models = [model],\n",
" image_config = myimage_config\n",
" )\n",
"\n",
"myservice.wait_for_deployment(show_output=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#for using the Web HTTP API \n",
"print(myservice.scoring_uri)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Test the Best Fitted Model\n",
"\n",
"#### Load Test Data - you can split the dataset beforehand & pass Train dataset to AutoML and use Test dataset to evaluate the best model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn import datasets\n",
"digits = datasets.load_digits()\n",
"X_test = digits.data[:10, :]\n",
"y_test = digits.target[:10]\n",
"images = digits.images[:10]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Testing Our Best Fitted Model\n",
"We will try to predict digits and see how our model works. This is just an example to show you."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Randomly select digits and test.\n",
"for index in np.random.choice(len(y_test), 2, replace = False):\n",
" print(index)\n",
" predicted = fitted_model.predict(X_test[index:index + 1])[0]\n",
" label = y_test[index]\n",
" title = \"Label value = %d Predicted value = %d \" % (label, predicted)\n",
" fig = plt.figure(1, figsize = (3,3))\n",
" ax1 = fig.add_axes((0,0,.8,.8))\n",
" ax1.set_title(title)\n",
" plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')\n",
" display(fig)"
]
}
],
"metadata": {
"authors": [
{
"name": "savitam"
},
{
"name": "wamartin"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.0"
},
"name": "auto-ml-classification-local-adb",
"notebookId": 3888835968049288
},
"nbformat": 4,
"nbformat_minor": 0
}

View File

@@ -38,13 +38,11 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core import Workspace, Run\n", "from azureml.core import Workspace\n",
"from azureml.core.compute import AksCompute, ComputeTarget\n", "from azureml.core.compute import AksCompute, ComputeTarget\n",
"from azureml.core.webservice import Webservice, AksWebservice\n", "from azureml.core.webservice import AksWebservice\n",
"from azureml.core.image import Image\n",
"from azureml.core.model import Model\n",
"\n",
"import azureml.core\n", "import azureml.core\n",
"import json\n",
"print(azureml.core.VERSION)" "print(azureml.core.VERSION)"
] ]
}, },
@@ -247,7 +245,6 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"%%time\n", "%%time\n",
"import json\n",
"\n", "\n",
"test_sample = json.dumps({'data': [\n", "test_sample = json.dumps({'data': [\n",
" [1,28,13,45,54,6,57,8,8,10], \n", " [1,28,13,45,54,6,57,8,8,10], \n",
@@ -401,7 +398,6 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"%%time\n", "%%time\n",
"import json\n",
"\n", "\n",
"test_sample = json.dumps({'data': [\n", "test_sample = json.dumps({'data': [\n",
" [1,28,13,45,54,6,57,8,8,10], \n", " [1,28,13,45,54,6,57,8,8,10], \n",
@@ -469,13 +465,13 @@
"metadata": { "metadata": {
"authors": [ "authors": [
{ {
"name": "marthalc" "name": "jocier"
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python [default]", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python3" "name": "python36"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
@@ -487,7 +483,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.6.5" "version": "3.6.3"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@@ -37,12 +37,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core import Workspace, Run\n", "from azureml.core import Workspace\n",
"from azureml.core.compute import AksCompute, ComputeTarget\n", "from azureml.core.compute import AksCompute, ComputeTarget\n",
"from azureml.core.webservice import Webservice, AksWebservice\n", "from azureml.core.webservice import Webservice, AksWebservice\n",
"from azureml.core.image import Image\n",
"from azureml.core.model import Model\n",
"\n",
"import azureml.core\n", "import azureml.core\n",
"print(azureml.core.VERSION)" "print(azureml.core.VERSION)"
] ]
@@ -51,8 +48,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## 2. Set up your configuration and create a workspace\n", "## 2. Set up your configuration and create a workspace"
"Follow Notebook 00 instructions to do this.\n"
] ]
}, },
{ {
@@ -277,9 +273,7 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": { "metadata": {},
"scrolled": true
},
"source": [ "source": [
"```python \n", "```python \n",
" %%time\n", " %%time\n",
@@ -403,11 +397,11 @@
"source": [ "source": [
"### b. Connect Blob to Power Bi (Small Data only)\n", "### b. Connect Blob to Power Bi (Small Data only)\n",
"1. Download and Open PowerBi Desktop\n", "1. Download and Open PowerBi Desktop\n",
"2. Select Get Data and click on Azure Blob Storage >> Connect\n", "2. Select \"Get Data\" and click on \"Azure Blob Storage\" >> Connect\n",
"3. Add your storage account and enter your storage key.\n", "3. Add your storage account and enter your storage key.\n",
"4. Select the container where your Data Collection is stored and click on Edit. \n", "4. Select the container where your Data Collection is stored and click on Edit. \n",
"5. In the query editor, click under Name column and add your Storage account Model path into the filter. Note: if you want to only look into files from a specific year or month, just expand the filter path. For example, just look into March data: /modeldata/subscriptionid>/resourcegroupname>/workspacename>/webservicename>/modelname>/modelversion>/identifier>/year>/3\n", "5. In the query editor, click under \"Name\" column and add your Storage account Model path into the filter. Note: if you want to only look into files from a specific year or month, just expand the filter path. For example, just look into March data: /modeldata/subscriptionid>/resourcegroupname>/workspacename>/webservicename>/modelname>/modelversion>/identifier>/year>/3\n",
"6. Click on the double arrow aside the Content column to combine the files. \n", "6. Click on the double arrow aside the \"Content\" column to combine the files. \n",
"7. Click OK and the data will preload.\n", "7. Click OK and the data will preload.\n",
"8. You can now click Close and Apply and start building your custom reports on your Model Input data." "8. You can now click Close and Apply and start building your custom reports on your Model Input data."
] ]
@@ -451,13 +445,13 @@
"metadata": { "metadata": {
"authors": [ "authors": [
{ {
"name": "marthalc" "name": "jocier"
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python [default]", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python3" "name": "python36"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
@@ -469,7 +463,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.6.5" "version": "3.6.3"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@@ -4,7 +4,7 @@ These tutorials show how to create and deploy Open Neural Network eXchange ([ONN
## Tutorials ## Tutorials
0. [Configure your Azure Machine Learning Workspace](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) 0. [Configure your Azure Machine Learning Workspace](../../../configuration.ipynb)
#### Obtain models from the [ONNX Model Zoo](https://github.com/onnx/models) and deploy with ONNX Runtime Inference #### Obtain models from the [ONNX Model Zoo](https://github.com/onnx/models) and deploy with ONNX Runtime Inference
1. [Handwritten Digit Classification (MNIST)](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.ipynb) 1. [Handwritten Digit Classification (MNIST)](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.ipynb)

View File

@@ -33,7 +33,7 @@
"To make the best use of your time, make sure you have done the following:\n", "To make the best use of your time, make sure you have done the following:\n",
"\n", "\n",
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n", "* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
"* Go through the [00.configuration.ipynb](../00.configuration.ipynb) notebook to:\n", "* Go through the [configuration](../../../configuration.ipynb) notebook to:\n",
" * install the AML SDK\n", " * install the AML SDK\n",
" * create a workspace and its configuration file (config.json)" " * create a workspace and its configuration file (config.json)"
] ]
@@ -71,7 +71,7 @@
"source": [ "source": [
"## Convert model to ONNX\n", "## Convert model to ONNX\n",
"\n", "\n",
"First we download the CoreML model. We use the CoreML model listed at https://coreml.store/tinyyolo. This may take a few minutes." "First we download the CoreML model. We use the CoreML model from [Matthijs Hollemans's tutorial](https://github.com/hollance/YOLO-CoreML-MPSNNGraph). This may take a few minutes."
] ]
}, },
{ {
@@ -82,8 +82,8 @@
"source": [ "source": [
"import urllib.request\n", "import urllib.request\n",
"\n", "\n",
"onnx_model_url = \"https://s3-us-west-2.amazonaws.com/coreml-models/TinyYOLO.mlmodel\"\n", "coreml_model_url = \"https://github.com/hollance/YOLO-CoreML-MPSNNGraph/raw/master/TinyYOLO-CoreML/TinyYOLO-CoreML/TinyYOLO.mlmodel\"\n",
"urllib.request.urlretrieve(onnx_model_url, filename=\"TinyYOLO.mlmodel\")\n" "urllib.request.urlretrieve(coreml_model_url, filename=\"TinyYOLO.mlmodel\")\n"
] ]
}, },
{ {
@@ -409,7 +409,7 @@
"metadata": { "metadata": {
"authors": [ "authors": [
{ {
"name": "onnx" "name": "viswamy"
} }
], ],
"kernelspec": { "kernelspec": {
@@ -427,7 +427,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.5.6" "version": "3.6.5"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@@ -34,7 +34,7 @@
"## Prerequisites\n", "## Prerequisites\n",
"\n", "\n",
"### 1. Install Azure ML SDK and create a new workspace\n", "### 1. Install Azure ML SDK and create a new workspace\n",
"Please follow [Azure ML configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/00.configuration.ipynb) to set up your environment.\n", "Please follow [Azure ML configuration notebook](../../../configuration.ipynb) to set up your environment.\n",
"\n", "\n",
"### 2. Install additional packages needed for this Notebook\n", "### 2. Install additional packages needed for this Notebook\n",
"You need to install the popular plotting library `matplotlib`, the image manipulation library `opencv`, and the `onnx` library in the conda environment where Azure Maching Learning SDK is installed.\n", "You need to install the popular plotting library `matplotlib`, the image manipulation library `opencv`, and the `onnx` library in the conda environment where Azure Maching Learning SDK is installed.\n",
@@ -197,7 +197,6 @@
"source": [ "source": [
"# for images and plots in this notebook\n", "# for images and plots in this notebook\n",
"import matplotlib.pyplot as plt \n", "import matplotlib.pyplot as plt \n",
"from IPython.display import Image\n",
"\n", "\n",
"# display images inline\n", "# display images inline\n",
"%matplotlib inline" "%matplotlib inline"
@@ -481,8 +480,8 @@
" \n", " \n",
" emotion_keys = list(emotion_table.keys())\n", " emotion_keys = list(emotion_table.keys())\n",
" emotions = []\n", " emotions = []\n",
" for i in range(N):\n", " for c in range(N):\n",
" emotions.append(emotion_keys[classes[i]])\n", " emotions.append(emotion_keys[classes[c]])\n",
" return emotions\n", " return emotions\n",
"\n", "\n",
"def softmax(x):\n", "def softmax(x):\n",
@@ -534,9 +533,9 @@
"# read in 3 testing images from .pb files\n", "# read in 3 testing images from .pb files\n",
"test_data_size = 3\n", "test_data_size = 3\n",
"\n", "\n",
"for i in np.arange(test_data_size):\n", "for num in np.arange(test_data_size):\n",
" input_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(i), 'input_0.pb')\n", " input_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(num), 'input_0.pb')\n",
" output_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(i), 'output_0.pb')\n", " output_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(num), 'output_0.pb')\n",
" \n", " \n",
" # convert protobuf tensors to np arrays using the TensorProto reader from ONNX\n", " # convert protobuf tensors to np arrays using the TensorProto reader from ONNX\n",
" tensor = onnx.TensorProto()\n", " tensor = onnx.TensorProto()\n",
@@ -671,19 +670,19 @@
" \"\"\"Convert the input image into grayscale\"\"\"\n", " \"\"\"Convert the input image into grayscale\"\"\"\n",
" return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\n", " return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\n",
"\n", "\n",
"def resize_img(img):\n", "def resize_img(img_to_resize):\n",
" \"\"\"Resize image to MNIST model input dimensions\"\"\"\n", " \"\"\"Resize image to FER+ model input dimensions\"\"\"\n",
" img = cv2.resize(img, dsize=(64, 64), interpolation=cv2.INTER_AREA)\n", " r_img = cv2.resize(img_to_resize, dsize=(64, 64), interpolation=cv2.INTER_AREA)\n",
" img.resize((1, 1, 64, 64))\n", " r_img.resize((1, 1, 64, 64))\n",
" return img\n", " return r_img\n",
"\n", "\n",
"def preprocess(img):\n", "def preprocess(img_to_preprocess):\n",
" \"\"\"Resize input images and convert them to grayscale.\"\"\"\n", " \"\"\"Resize input images and convert them to grayscale.\"\"\"\n",
" if img.shape == (64, 64):\n", " if img_to_preprocess.shape == (64, 64):\n",
" img.resize((1, 1, 64, 64))\n", " img_to_preprocess.resize((1, 1, 64, 64))\n",
" return img\n", " return img_to_preprocess\n",
" \n", " \n",
" grayscale = rgb2gray(img)\n", " grayscale = rgb2gray(img_to_preprocess)\n",
" processed_img = resize_img(grayscale)\n", " processed_img = resize_img(grayscale)\n",
" return processed_img" " return processed_img"
] ]
@@ -732,7 +731,7 @@
" r = json.loads(aci_service.run(input_data))\n", " r = json.loads(aci_service.run(input_data))\n",
" result = r['result'][0]\n", " result = r['result'][0]\n",
" time_ms = np.round(r['time_in_sec'][0] * 1000, 2)\n", " time_ms = np.round(r['time_in_sec'][0] * 1000, 2)\n",
" except Exception as e:\n", " except KeyError as e:\n",
" print(str(e))\n", " print(str(e))\n",
"\n", "\n",
" plt.figure(figsize = (16, 6))\n", " plt.figure(figsize = (16, 6))\n",
@@ -800,7 +799,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.6.6" "version": "3.6.5"
}, },
"msauthor": "vinitra.swamy" "msauthor": "vinitra.swamy"
}, },

View File

@@ -34,7 +34,7 @@
"## Prerequisites\n", "## Prerequisites\n",
"\n", "\n",
"### 1. Install Azure ML SDK and create a new workspace\n", "### 1. Install Azure ML SDK and create a new workspace\n",
"Please follow [Azure ML configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/00.configuration.ipynb) to set up your environment.\n", "Please follow [Azure ML configuration notebook](../../../configuration.ipynb) to set up your environment.\n",
"\n", "\n",
"### 2. Install additional packages needed for this tutorial notebook\n", "### 2. Install additional packages needed for this tutorial notebook\n",
"You need to install the popular plotting library `matplotlib`, the image manipulation library `opencv`, and the `onnx` library in the conda environment where Azure Maching Learning SDK is installed. \n", "You need to install the popular plotting library `matplotlib`, the image manipulation library `opencv`, and the `onnx` library in the conda environment where Azure Maching Learning SDK is installed. \n",
@@ -621,19 +621,19 @@
" \"\"\"Convert the input image into grayscale\"\"\"\n", " \"\"\"Convert the input image into grayscale\"\"\"\n",
" return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\n", " return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\n",
"\n", "\n",
"def resize_img(img):\n", "def resize_img(img_to_resize):\n",
" \"\"\"Resize image to MNIST model input dimensions\"\"\"\n", " \"\"\"Resize image to MNIST model input dimensions\"\"\"\n",
" img = cv2.resize(img, dsize=(28, 28), interpolation=cv2.INTER_AREA)\n", " r_img = cv2.resize(img_to_resize, dsize=(28, 28), interpolation=cv2.INTER_AREA)\n",
" img.resize((1, 1, 28, 28))\n", " r_img.resize((1, 1, 28, 28))\n",
" return img\n", " return r_img\n",
"\n", "\n",
"def preprocess(img):\n", "def preprocess(img_to_preprocess):\n",
" \"\"\"Resize input images and convert them to grayscale.\"\"\"\n", " \"\"\"Resize input images and convert them to grayscale.\"\"\"\n",
" if img.shape == (28, 28):\n", " if img_to_preprocess.shape == (28, 28):\n",
" img.resize((1, 1, 28, 28))\n", " img_to_preprocess.resize((1, 1, 28, 28))\n",
" return img\n", " return img_to_preprocess\n",
" \n", " \n",
" grayscale = rgb2gray(img)\n", " grayscale = rgb2gray(img_to_preprocess)\n",
" processed_img = resize_img(grayscale)\n", " processed_img = resize_img(grayscale)\n",
" return processed_img" " return processed_img"
] ]
@@ -681,7 +681,7 @@
" r = aci_service.run(input_data)\n", " r = aci_service.run(input_data)\n",
" result = r['result']\n", " result = r['result']\n",
" time_ms = np.round(r['time_in_sec'] * 1000, 2)\n", " time_ms = np.round(r['time_in_sec'] * 1000, 2)\n",
" except Exception as e:\n", " except KeyError as e:\n",
" print(str(e))\n", " print(str(e))\n",
"\n", "\n",
" plt.figure(figsize = (16, 6))\n", " plt.figure(figsize = (16, 6))\n",

View File

@@ -33,7 +33,7 @@
"To make the best use of your time, make sure you have done the following:\n", "To make the best use of your time, make sure you have done the following:\n",
"\n", "\n",
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n", "* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
"* Go through the [00.configuration.ipynb](../00.configuration.ipynb) notebook to:\n", "* Go through the [configuration notebook](../../../configuration.ipynb) to:\n",
" * install the AML SDK\n", " * install the AML SDK\n",
" * create a workspace and its configuration file (config.json)" " * create a workspace and its configuration file (config.json)"
] ]
@@ -393,7 +393,7 @@
"metadata": { "metadata": {
"authors": [ "authors": [
{ {
"name": "onnx" "name": "viswamy"
} }
], ],
"kernelspec": { "kernelspec": {
@@ -411,7 +411,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.5.6" "version": "3.6.5"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@@ -13,7 +13,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## 10. Register Model, Create Image and Deploy Service\n", "## Register Model, Create Image and Deploy Service\n",
"\n", "\n",
"This example shows how to deploy a web service in step-by-step fashion:\n", "This example shows how to deploy a web service in step-by-step fashion:\n",
"\n", "\n",
@@ -24,9 +24,9 @@
" 5. Deploy the image as web service\n", " 5. Deploy the image as web service\n",
" \n", " \n",
"**IMPORTANT**:\n", "**IMPORTANT**:\n",
" * This notebook requires you to first complete \"01.SDK-101-Train-and-Deploy-to-ACI.ipynb\" Notebook\n", " * This notebook requires you to first complete [train-within-notebook](../../training/train-within-notebook/train-within-notebook.ipynb) example\n",
" \n", " \n",
"The 101 Notebook taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. " "The train-within-notebook example taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. "
] ]
}, },
{ {
@@ -34,7 +34,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Prerequisites\n", "## Prerequisites\n",
"Make sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't." "Make sure you go through the [configuration](../../../configuration.ipynb) Notebook first if you haven't."
] ]
}, },
{ {
@@ -241,7 +241,8 @@
" description = \"Image with ridge regression model\")\n", " description = \"Image with ridge regression model\")\n",
"\n", "\n",
"image = Image.create(name = \"myimage1\",\n", "image = Image.create(name = \"myimage1\",\n",
" # this is the model object \n", " # this is the model object. note you can pass in 0-n models via this list-type parameter\n",
" # in case you need to reference multiple models, or none at all, in your scoring script.\n",
" models = [model],\n", " models = [model],\n",
" image_config = image_config, \n", " image_config = image_config, \n",
" workspace = ws)" " workspace = ws)"

View File

@@ -44,6 +44,9 @@ In this directory, there are two types of notebooks:
4. [aml-pipelines-data-transfer.ipynb](https://aka.ms/pl-data-trans) 4. [aml-pipelines-data-transfer.ipynb](https://aka.ms/pl-data-trans)
5. [aml-pipelines-use-databricks-as-compute-target.ipynb](https://aka.ms/pl-databricks) 5. [aml-pipelines-use-databricks-as-compute-target.ipynb](https://aka.ms/pl-databricks)
6. [aml-pipelines-use-adla-as-compute-target.ipynb](https://aka.ms/pl-adla) 6. [aml-pipelines-use-adla-as-compute-target.ipynb](https://aka.ms/pl-adla)
7. [aml-pipelines-parameter-tuning-with-hyperdrive.ipynb](https://aka.ms/pl-hyperdrive)
8. [aml-pipelines-how-to-use-azurebatch-to-run-a-windows-executable.ipynb](https://aka.ms/pl-azbatch)
9. [aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb](https://aka.ms/pl-schedule)
* The second type of notebooks illustrate more sophisticated scenarios, and are independent of each other. These notebooks include: * The second type of notebooks illustrate more sophisticated scenarios, and are independent of each other. These notebooks include:

View File

@@ -17,7 +17,7 @@
"\n", "\n",
"In certain cases, you will need to transfer data from one data location to another. For example, your data may be in Files storage and you may want to move it to Blob storage. Or, if your data is in an ADLS account and you want to make it available in the Blob storage. The built-in **DataTransferStep** class helps you transfer data in these situations.\n", "In certain cases, you will need to transfer data from one data location to another. For example, your data may be in Files storage and you may want to move it to Blob storage. Or, if your data is in an ADLS account and you want to make it available in the Blob storage. The built-in **DataTransferStep** class helps you transfer data in these situations.\n",
"\n", "\n",
"The below example shows how to move data in an ADLS account to Blob storage." "The below example shows how to move data between an ADLS account, Blob storage, SQL Server, PostgreSQL server. "
] ]
}, },
{ {
@@ -35,16 +35,12 @@
"source": [ "source": [
"import os\n", "import os\n",
"import azureml.core\n", "import azureml.core\n",
"from azureml.core.compute import ComputeTarget, DatabricksCompute, DataFactoryCompute\n", "from azureml.core.compute import ComputeTarget, DataFactoryCompute\n",
"from azureml.exceptions import ComputeTargetException\n", "from azureml.exceptions import ComputeTargetException\n",
"from azureml.core import Workspace, Run, Experiment\n", "from azureml.core import Workspace, Experiment\n",
"from azureml.pipeline.core import Pipeline, PipelineData\n", "from azureml.pipeline.core import Pipeline\n",
"from azureml.pipeline.steps import AdlaStep\n",
"from azureml.core.datastore import Datastore\n", "from azureml.core.datastore import Datastore\n",
"from azureml.data.data_reference import DataReference\n", "from azureml.data.data_reference import DataReference\n",
"from azureml.data.sql_data_reference import SqlDataReference\n",
"from azureml.core import attach_legacy_compute_target\n",
"from azureml.data.stored_procedure_parameter import StoredProcedureParameter, StoredProcedureParameterType\n",
"from azureml.pipeline.steps import DataTransferStep\n", "from azureml.pipeline.steps import DataTransferStep\n",
"\n", "\n",
"# Check core SDK version number\n", "# Check core SDK version number\n",
@@ -89,7 +85,9 @@
"\n", "\n",
"For background on registering your data store, consult this article:\n", "For background on registering your data store, consult this article:\n",
"\n", "\n",
"https://docs.microsoft.com/en-us/azure/data-lake-store/data-lake-store-service-to-service-authenticate-using-active-directory" "https://docs.microsoft.com/en-us/azure/data-lake-store/data-lake-store-service-to-service-authenticate-using-active-directory\n",
"\n",
"### register datastores for Azure Data Lake and Azure Blob storage"
] ]
}, },
{ {
@@ -98,8 +96,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from msrest.exceptions import HttpOperationError\n",
"\n", "\n",
"workspace = ws.name\n",
"datastore_name='MyAdlsDatastore'\n", "datastore_name='MyAdlsDatastore'\n",
"subscription_id=os.getenv(\"ADL_SUBSCRIPTION_62\", \"<my-subscription-id>\") # subscription id of ADLS account\n", "subscription_id=os.getenv(\"ADL_SUBSCRIPTION_62\", \"<my-subscription-id>\") # subscription id of ADLS account\n",
"resource_group=os.getenv(\"ADL_RESOURCE_GROUP_62\", \"<my-resource-group>\") # resource group of ADLS account\n", "resource_group=os.getenv(\"ADL_RESOURCE_GROUP_62\", \"<my-resource-group>\") # resource group of ADLS account\n",
@@ -111,7 +109,7 @@
"try:\n", "try:\n",
" adls_datastore = Datastore.get(ws, datastore_name)\n", " adls_datastore = Datastore.get(ws, datastore_name)\n",
" print(\"found datastore with name: %s\" % datastore_name)\n", " print(\"found datastore with name: %s\" % datastore_name)\n",
"except:\n", "except HttpOperationError:\n",
" adls_datastore = Datastore.register_azure_data_lake(\n", " adls_datastore = Datastore.register_azure_data_lake(\n",
" workspace=ws,\n", " workspace=ws,\n",
" datastore_name=datastore_name,\n", " datastore_name=datastore_name,\n",
@@ -133,7 +131,7 @@
"try:\n", "try:\n",
" blob_datastore = Datastore.get(ws, blob_datastore_name)\n", " blob_datastore = Datastore.get(ws, blob_datastore_name)\n",
" print(\"found blob datastore with name: %s\" % blob_datastore_name)\n", " print(\"found blob datastore with name: %s\" % blob_datastore_name)\n",
"except:\n", "except HttpOperationError:\n",
" blob_datastore = Datastore.register_azure_blob_container(\n", " blob_datastore = Datastore.register_azure_blob_container(\n",
" workspace=ws,\n", " workspace=ws,\n",
" datastore_name=blob_datastore_name,\n", " datastore_name=blob_datastore_name,\n",
@@ -150,7 +148,65 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Create DataReferences" "### register datastores for Azure SQL Server and Azure database for PostgreSQL"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"sql_datastore_name=\"MySqlDatastore\"\n",
"server_name=os.getenv(\"SQL_SERVERNAME_62\", \"<my-server-name>\") # Name of SQL server\n",
"database_name=os.getenv(\"SQL_DATBASENAME_62\", \"<my-database-name>\") # Name of SQL database\n",
"client_id=os.getenv(\"SQL_CLIENTNAME_62\", \"<my-client-id>\") # client id of service principal with permissions to access database\n",
"client_secret=os.getenv(\"SQL_CLIENTSECRET_62\", \"<my-client-secret>\") # the secret of service principal\n",
"tenant_id=os.getenv(\"SQL_TENANTID_62\", \"<my-tenant-id>\") # tenant id of service principal\n",
"\n",
"try:\n",
" sql_datastore = Datastore.get(ws, sql_datastore_name)\n",
" print(\"found sql database datastore with name: %s\" % sql_datastore_name)\n",
"except HttpOperationError:\n",
" sql_datastore = Datastore.register_azure_sql_database(\n",
" workspace=ws,\n",
" datastore_name=sql_datastore_name,\n",
" server_name=server_name,\n",
" database_name=database_name,\n",
" client_id=client_id,\n",
" client_secret=client_secret,\n",
" tenant_id=tenant_id)\n",
" print(\"registered sql databse datastore with name: %s\" % sql_datastore_name)\n",
"\n",
" \n",
"psql_datastore_name=\"MyPostgreSqlDatastore\"\n",
"server_name=os.getenv(\"PSQL_SERVERNAME_62\", \"<my-server-name>\") # Name of PostgreSQL server \n",
"database_name=os.getenv(\"PSQL_DATBASENAME_62\", \"<my-database-name>\") # Name of PostgreSQL database\n",
"user_id=os.getenv(\"PSQL_USERID_62\", \"<my-user-id>\") # user id\n",
"user_password=os.getenv(\"PSQL_USERPW_62\", \"<my-user-password>\") # user password\n",
"\n",
"try:\n",
" psql_datastore = Datastore.get(ws, psql_datastore_name)\n",
" print(\"found PostgreSQL database datastore with name: %s\" % psql_datastore_name)\n",
"except HttpOperationError:\n",
" psql_datastore = Datastore.register_azure_postgre_sql(\n",
" workspace=ws,\n",
" datastore_name=psql_datastore,\n",
" server_name=server_name,\n",
" database_name=database_name,\n",
" user_id=user_id,\n",
" user_password=user_password)\n",
" print(\"registered PostgreSQL databse datastore with name: %s\" % psql_datastore_name)\n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create DataReferences\n",
"### create DataReferences for Azure Data Lake and Azure Blob storage"
] ]
}, },
{ {
@@ -178,6 +234,39 @@
"print(\"obtained adls, blob data references\")" "print(\"obtained adls, blob data references\")"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### create DataReferences for Azure SQL Server and Azure database for PostgreSQL"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data.sql_data_reference import SqlDataReference\n",
"\n",
"sql_datastore = Datastore(workspace=ws, name=\"MySqlDatastore\")\n",
"\n",
"sql_query_data_ref = SqlDataReference(\n",
" datastore=sql_datastore,\n",
" data_reference_name=\"sql_query_data_ref\",\n",
" sql_query=\"select top 1 * from TestData\")\n",
"\n",
"\n",
"psql_datastore = Datastore(workspace=ws, name=\"MyPostgreSqlDatastore\")\n",
"\n",
"psql_query_data_ref = SqlDataReference(\n",
" datastore=psql_datastore,\n",
" data_reference_name=\"psql_query_data_ref\",\n",
" sql_query=\"SELECT * FROM testtable\")\n",
"\n",
"print(\"obtained Sql server, PostgreSQL data references\")"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -255,6 +344,29 @@
"print(\"data transfer step created\")" "print(\"data transfer step created\")"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"transfer_sql_to_blob = DataTransferStep(\n",
" name=\"transfer_sql_to_blob\",\n",
" source_data_reference=sql_query_data_ref,\n",
" destination_data_reference=blob_data_ref,\n",
" compute_target=data_factory_compute,\n",
" destination_reference_type='file')\n",
"\n",
"transfer_psql_to_blob = DataTransferStep(\n",
" name=\"transfer_psql_to_blob\",\n",
" source_data_reference=psql_query_data_ref,\n",
" destination_data_reference=blob_data_ref,\n",
" compute_target=data_factory_compute,\n",
" destination_reference_type='file')\n",
"\n",
"print(\"data transfer step created for Sql server and PostgreSQL\")"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -268,13 +380,28 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"pipeline = Pipeline(\n", "pipeline_01 = Pipeline(\n",
" description=\"data_transfer_101\",\n", " description=\"data_transfer_01\",\n",
" workspace=ws,\n", " workspace=ws,\n",
" steps=[transfer_adls_to_blob])\n", " steps=[transfer_adls_to_blob])\n",
"\n", "\n",
"pipeline_run = Experiment(ws, \"Data_Transfer_example\").submit(pipeline)\n", "pipeline_run_01 = Experiment(ws, \"Data_Transfer_example_01\").submit(pipeline_01)\n",
"pipeline_run.wait_for_completion()" "pipeline_run_01.wait_for_completion()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_02 = Pipeline(\n",
" description=\"data_transfer_02\",\n",
" workspace=ws,\n",
" steps=[transfer_sql_to_blob,transfer_psql_to_blob])\n",
"\n",
"pipeline_run_02 = Experiment(ws, \"Data_Transfer_example_02\").submit(pipeline_02)\n",
"pipeline_run_02.wait_for_completion()"
] ]
}, },
{ {
@@ -291,7 +418,17 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.widgets import RunDetails\n", "from azureml.widgets import RunDetails\n",
"RunDetails(pipeline_run).show()" "RunDetails(pipeline_run_01).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"RunDetails(pipeline_run_02).show()"
] ]
}, },
{ {
@@ -310,9 +447,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python3" "name": "python36"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
@@ -324,7 +461,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.6.7" "version": "3.6.2"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@@ -16,10 +16,20 @@
"\n", "\n",
"## Overview\n", "## Overview\n",
"\n", "\n",
"Read [Azure Machine Learning Pipelines](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines) overview, or the [readme article](../README.md) on Azure Machine Learning Pipelines to get more information.\n",
" \n",
"\n", "\n",
"This Notebook shows basic construction of a **pipeline** that runs jobs unattended in different compute clusters. " "A common scenario when using machine learning components is to have a data workflow that includes the following steps:\n",
"\n",
"- Preparing/preprocessing a given dataset for training, followed by\n",
"- Training a machine learning model on this data, and then\n",
"- Deploying this trained model in a separate environment, and finally\n",
"- Running a batch scoring task on another data set, using the trained model.\n",
"\n",
"Azure's Machine Learning pipelines give you a way to combine multiple steps like these into one configurable workflow, so that multiple agents/users can share and/or reuse this workflow. Machine learning pipelines thus provide a consistent, reproducible mechanism for building, evaluating, deploying, and running ML systems.\n",
"\n",
"To get more information about Azure machine learning pipelines, please read our [Azure Machine Learning Pipelines](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines) overview, or the [readme article](../README.md).\n",
"\n",
"In this notebook, we provide a gentle introduction to Azure machine learning pipelines. We build a pipeline that runs jobs unattended on different compute clusters; in this notebook, you'll see how to use the basic Azure ML SDK APIs for constructing this pipeline.\n",
" "
] ]
}, },
{ {
@@ -45,11 +55,11 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import os\n",
"import azureml.core\n", "import azureml.core\n",
"from azureml.core import Workspace, Run, Experiment, Datastore\n", "from azureml.core import Workspace, Experiment, Datastore\n",
"from azureml.core.compute import AmlCompute\n", "from azureml.core.compute import AmlCompute\n",
"from azureml.core.compute import ComputeTarget\n", "from azureml.core.compute import ComputeTarget\n",
"from azureml.core.compute import DataFactoryCompute\n",
"from azureml.widgets import RunDetails\n", "from azureml.widgets import RunDetails\n",
"\n", "\n",
"# Check core SDK version number\n", "# Check core SDK version number\n",
@@ -71,12 +81,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.data.data_reference import DataReference\n", "from azureml.pipeline.core import Pipeline\n",
"from azureml.pipeline.core import Pipeline, PipelineData, StepSequence\n",
"from azureml.pipeline.steps import PythonScriptStep\n", "from azureml.pipeline.steps import PythonScriptStep\n",
"from azureml.pipeline.steps import DataTransferStep\n",
"from azureml.pipeline.core import PublishedPipeline\n",
"from azureml.pipeline.core.graph import PipelineParameter\n",
"\n", "\n",
"print(\"Pipeline SDK-specific imports completed\")" "print(\"Pipeline SDK-specific imports completed\")"
] ]
@@ -124,7 +130,7 @@
"# project folder\n", "# project folder\n",
"project_folder = '.'\n", "project_folder = '.'\n",
" \n", " \n",
"print('Sample projects will be created in {}.'.format(project_folder))" "print('Sample projects will be created in {}.'.format(os.path.realpath(project_folder)))"
] ]
}, },
{ {
@@ -140,7 +146,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Datastore concepts\n", "### Datastore concepts\n",
"A [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class) is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target. \n", "A [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target. \n",
"\n", "\n",
"A Datastore can either be backed by an Azure File Storage (default) or by an Azure Blob Storage.\n", "A Datastore can either be backed by an Azure File Storage (default) or by an Azure Blob Storage.\n",
"\n", "\n",
@@ -237,12 +243,13 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core.compute_target import ComputeTargetException\n",
"\n", "\n",
"aml_compute_target = \"aml-compute\"\n", "aml_compute_target = \"aml-compute\"\n",
"try:\n", "try:\n",
" aml_compute = AmlCompute(ws, aml_compute_target)\n", " aml_compute = AmlCompute(ws, aml_compute_target)\n",
" print(\"found existing compute target.\")\n", " print(\"found existing compute target.\")\n",
"except:\n", "except ComputeTargetException:\n",
" print(\"creating new compute target\")\n", " print(\"creating new compute target\")\n",
" \n", " \n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\",\n", " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\",\n",
@@ -260,9 +267,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"# For a more detailed view of current Azure Machine Learning Compute status, use the 'status' property\n", "# For a more detailed view of current Azure Machine Learning Compute status, use get_status()\n",
"# example: un-comment the following line.\n", "# example: un-comment the following line.\n",
"# print(aml_compute.status.serialize())" "# print(aml_compute.get_status().serialize())"
] ]
}, },
{ {
@@ -326,7 +333,7 @@
" script_name=\"train.py\", \n", " script_name=\"train.py\", \n",
" compute_target=aml_compute, \n", " compute_target=aml_compute, \n",
" source_directory=project_folder,\n", " source_directory=project_folder,\n",
" allow_reuse=False)\n", " allow_reuse=True)\n",
"print(\"Step1 created\")" "print(\"Step1 created\")"
] ]
}, },
@@ -379,7 +386,7 @@
"### Build the pipeline\n", "### Build the pipeline\n",
"Once we have the steps (or steps collection), we can build the [pipeline](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipeline.pipeline?view=azure-ml-py). By deafult, all these steps will run in **parallel** once we submit the pipeline for run.\n", "Once we have the steps (or steps collection), we can build the [pipeline](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipeline.pipeline?view=azure-ml-py). By deafult, all these steps will run in **parallel** once we submit the pipeline for run.\n",
"\n", "\n",
"A pipeline is created with a list of steps and a workspace. Submit a pipeline using [submit](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.experiment%28class%29?view=azure-ml-py#submit). When submit is called, a [PipelineRun](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelinerun?view=azure-ml-py) is created which in turn creates [StepRun](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.steprun?view=azure-ml-py) objects for each step in the workflow." "A pipeline is created with a list of steps and a workspace. Submit a pipeline using [submit](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.experiment(class)?view=azure-ml-py#submit-config--tags-none----kwargs-). When submit is called, a [PipelineRun](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelinerun?view=azure-ml-py) is created which in turn creates [StepRun](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.steprun?view=azure-ml-py) objects for each step in the workflow."
] ]
}, },
{ {
@@ -407,7 +414,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Validate the pipeline\n", "### Validate the pipeline\n",
"You have the option to [validate](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipeline.pipeline?view=azure-ml-py#validate) the pipeline prior to submitting for run. The platform runs validation steps such as checking for circular dependencies and parameter checks etc. even if you do not explicitly call validate method." "You have the option to [validate](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipeline.pipeline?view=azure-ml-py#validate--) the pipeline prior to submitting for run. The platform runs validation steps such as checking for circular dependencies and parameter checks etc. even if you do not explicitly call validate method."
] ]
}, },
{ {
@@ -440,7 +447,7 @@
"# continue_on_node_failure=False, \n", "# continue_on_node_failure=False, \n",
"# regenerate_outputs=False)\n", "# regenerate_outputs=False)\n",
"\n", "\n",
"pipeline_run1 = Experiment(ws, 'Hello_World1').submit(pipeline1, regenerate_outputs=True)\n", "pipeline_run1 = Experiment(ws, 'Hello_World1').submit(pipeline1, regenerate_outputs=False)\n",
"print(\"Pipeline is submitted for execution\")" "print(\"Pipeline is submitted for execution\")"
] ]
}, },
@@ -521,7 +528,7 @@
"## Running a few steps in sequence\n", "## Running a few steps in sequence\n",
"Now let's see how we run a few steps in sequence. We already have three steps defined earlier. Let's *reuse* those steps for this part.\n", "Now let's see how we run a few steps in sequence. We already have three steps defined earlier. Let's *reuse* those steps for this part.\n",
"\n", "\n",
"We will reuse step1, step2, step3, but build the pipeline in such a way that we chain step3 after step2 and step2 after step1. Note that there is no explicit data dependency between these steps, but still steps can be made dependent by using the [run_after](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.builder.pipelinestep?view=azure-ml-py#run-after) construct." "We will reuse step1, step2, step3, but build the pipeline in such a way that we chain step3 after step2 and step2 after step1. Note that there is no explicit data dependency between these steps, but still steps can be made dependent by using the [run_after](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.builder.pipelinestep?view=azure-ml-py#run-after-step-) construct."
] ]
}, },
{ {
@@ -584,9 +591,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python3" "name": "python36"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -0,0 +1,359 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Azure Machine Learning Pipeline with AzureBatchStep"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook is used to demonstrate the use of AzureBatchStep in Azure Machine Learning Pipeline.\n",
"An AzureBatchStep will submit a job to an AzureBatch Compute to run a simple windows executable."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Azure Machine Learning and Pipeline SDK-specific Imports"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"from azureml.core import Workspace, Experiment\n",
"from azureml.core.compute import ComputeTarget, BatchCompute\n",
"from azureml.core.datastore import Datastore\n",
"from azureml.data.data_reference import DataReference\n",
"from azureml.exceptions import ComputeTargetException\n",
"from azureml.pipeline.core import Pipeline, PipelineData\n",
"from azureml.pipeline.steps import AzureBatchStep\n",
"\n",
"import os\n",
"from os import path\n",
"from tempfile import mkdtemp\n",
"\n",
"\n",
"# Check core SDK version number\n",
"print(\"SDK version:\", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize Workspace"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Initialize a workspace object from persisted configuration. Make sure the config file is present at .\\config.json\n",
"\n",
"If you don't have a config.json file, please go through the configuration Notebook located here:\n",
"https://github.com/Azure/MachineLearningNotebooks. \n",
"\n",
"This sets you up with a working config file that has information on your workspace, subscription id, etc. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Attach Batch Compute to Workspace"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To submit jobs to Azure Batch service, you must attach your Azure Batch account to the workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"batch_compute_name = 'mybatchcompute' # Name to associate with new compute in workspace\n",
"\n",
"# Batch account details needed to attach as compute to workspace\n",
"batch_account_name = \"<batch_account_name>\" # Name of the Batch account\n",
"batch_resource_group = \"<batch_resource_group>\" # Name of the resource group which contains this account\n",
"\n",
"try:\n",
" # check if already attached\n",
" batch_compute = BatchCompute(ws, batch_compute_name)\n",
"except ComputeTargetException:\n",
" print('Attaching Batch compute...')\n",
" provisioning_config = BatchCompute.attach_configuration(resource_group=batch_resource_group, account_name=batch_account_name)\n",
" batch_compute = ComputeTarget.attach(ws, batch_compute_name, provisioning_config)\n",
" batch_compute.wait_for_completion()\n",
" print(\"Provisioning state:{}\".format(batch_compute.provisioning_state))\n",
" print(\"Provisioning errors:{}\".format(batch_compute.provisioning_errors))\n",
"\n",
"print(\"Using Batch compute:{}\".format(batch_compute.cluster_resource_id))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup DataStore"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Blob storage associated with the workspace\n",
"# The following call GETS the Azure Blob Store associated with your workspace.\n",
"# Note that workspaceblobstore is **the name of this store and CANNOT BE CHANGED and must be used as is** \n",
"default_blob_store = Datastore(ws, \"workspaceblobstore\")\n",
"print(\"Blobstore name: {}\".format(def_blob_store.name))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup Input and Output"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For this example we will upload a file in the provided DataStore. These are some helper methods to achieve that."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def create_local_file(content, file_name):\n",
" # create a file in a local temporary directory\n",
" temp_dir = mkdtemp()\n",
" with open(path.join(temp_dir, file_name), 'w') as f:\n",
" f.write(content)\n",
" return temp_dir\n",
"\n",
"\n",
"def upload_file_to_datastore(datastore, path, content):\n",
" dir = create_local_file(content=content, file_name=\"temp.file\")\n",
" datastore.upload(src_dir=dir, target_path=path, overwrite=True, show_progress=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Here we associate the input DataReference with an existing file in the provided DataStore. Feel free to upload the file of your choice manually or use the *upload_testdata* method. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"testdata_path=\"testdata.txt\"\n",
"\n",
"upload_file_to_datastore(datastore=default_blob_store, \n",
" path=testdata_path, \n",
" content=\"This is the content of the file\")\n",
"\n",
"testdata = DataReference(datastore=default_blob_store, \n",
" path_on_datastore=testdata_path, \n",
" data_reference_name=\"input\")\n",
"\n",
"outputdata = PipelineData(name=\"output\", datastore=datastore)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup AzureBatch Job Binaries"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"AzureBatch can run a task within the job and here we put a simple .cmd file to be executed. Feel free to put any binaries in the folder, or modify the .cmd file as needed, they will be uploaded once we create the AzureBatch Step."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"binaries_folder = \"azurebatch/job_binaries\"\n",
"if not os.path.isdir(binaries_folder):\n",
" os.mkdir(project_folder)\n",
"\n",
"file_name=\"azurebatch.cmd\"\n",
"with open(path.join(binaries_folder, file_name), 'w') as f:\n",
" f.write(\"copy \\\"%1\\\" \\\"%2\\\"\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create an AzureBatchStep"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"AzureBatchStep is used to submit a job to the attached Azure Batch compute.\n",
"- **name:** Name of the step\n",
"- **pool_id:** Name of the pool, it can be an existing pool, or one that will be created when the job is submitted\n",
"- **inputs:** List of inputs that will be processed by the job\n",
"- **outputs:** List of outputs the job will create\n",
"- **executable:** The executable that will run as part of the job\n",
"- **arguments:** Arguments for the executable. They can be plain string format, inputs, outputs or parameters\n",
"- **compute_target:** The compute target where the job will run.\n",
"- **source_directory:** The local directory with binaries to be executed by the job\n",
"\n",
"Optional parameters:\n",
"\n",
"- **create_pool:** Boolean flag to indicate whether create the pool before running the jobs\n",
"- **delete_batch_job_after_finish:** Boolean flag to indicate whether to delete the job from Batch account after it's finished\n",
"- **delete_batch_pool_after_finish:** Boolean flag to indicate whether to delete the pool after the job finishes\n",
"- **is_positive_exit_code_failure:** Boolean flag to indicate if the job fails if the task exists with a positive code\n",
"- **vm_image_urn:** If create_pool is true and VM uses VirtualMachineConfiguration. \n",
" Value format: 'urn:publisher:offer:sku'. \n",
" Example: urn:MicrosoftWindowsServer:WindowsServer:2012-R2-Datacenter \n",
" For more details: \n",
" https://docs.microsoft.com/en-us/azure/virtual-machines/windows/cli-ps-findimage#table-of-commonly-used-windows-images and \n",
" https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cli-ps-findimage#find-specific-images\n",
"- **run_task_as_admin:** Boolean flag to indicate if the task should run with Admin privileges\n",
"- **target_compute_nodes:** Assumes create_pool is true, indicates how many compute nodes will be added to the pool\n",
"- **source_directory:** Local folder that contains the module binaries, executable, assemblies etc.\n",
"- **executable:** Name of the command/executable that will be executed as part of the job\n",
"- **arguments:** Arguments for the command/executable\n",
"- **inputs:** List of input port bindings\n",
"- **outputs:** List of output port bindings\n",
"- **vm_size:** If create_pool is true, indicating Virtual machine size of the compute nodes\n",
"- **compute_target:** BatchCompute compute\n",
"- **allow_reuse:** Whether the module should reuse previous results when run with the same settings/inputs\n",
"- **version:** A version tag to denote a change in functionality for the module"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"step = AzureBatchStep(\n",
" name=\"Azure Batch Job\",\n",
" pool_id=\"MyPoolName\", # Replace this with the pool name of your choice\n",
" inputs=[testdata],\n",
" outputs=[outputdata],\n",
" executable=\"azurebatch.cmd\",\n",
" arguments=[testdata, outputdata],\n",
" compute_target=batch_compute,\n",
" source_directory=binaries_folder,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Build and Submit the Pipeline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline = Pipeline(workspace=ws, steps=[step])\n",
"pipeline_run = Experiment(ws, 'azurebatch_experiment').submit(pipeline)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Visualize the Running Pipeline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"RunDetails(pipeline_run).show()"
]
}
],
"metadata": {
"authors": [
{
"name": "diray"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,396 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Azure Machine Learning Pipeline with HyperDriveStep\n",
"\n",
"\n",
"This notebook is used to demonstrate the use of HyperDriveStep in AML Pipeline.\n",
"\n",
"## Azure Machine Learning and Pipeline SDK-specific imports\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import shutil\n",
"import urllib\n",
"from azureml.core import Experiment\n",
"from azureml.core.datastore import Datastore\n",
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.exceptions import ComputeTargetException\n",
"from azureml.data.data_reference import DataReference\n",
"from azureml.pipeline.steps import HyperDriveStep\n",
"from azureml.pipeline.core import Pipeline\n",
"from azureml.train.dnn import TensorFlow\n",
"from azureml.train.hyperdrive import *\n",
"\n",
"# Check core SDK version number\n",
"print(\"SDK version:\", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize workspace\n",
"\n",
"Initialize a workspace object from persisted configuration. Make sure the config file is present at .\\config.json"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create an Azure ML experiment\n",
"Let's create an experiment named \"tf-mnist\" and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"script_folder = './tf-mnist'\n",
"os.makedirs(script_folder, exist_ok=True)\n",
"\n",
"exp = Experiment(workspace=ws, name='tf-mnist')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Download MNIST dataset\n",
"In order to train on the MNIST dataset we will first need to download it from Yan LeCun's web site directly and save them in a `data` folder locally."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"os.makedirs('./data/mnist', exist_ok=True)\n",
"\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', filename = './data/mnist/train-images.gz')\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', filename = './data/mnist/train-labels.gz')\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename = './data/mnist/test-images.gz')\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename = './data/mnist/test-labels.gz')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Upload MNIST dataset to blob datastore \n",
"A [datastore](https://docs.microsoft.com/azure/machine-learning/service/how-to-access-data) is a place where data can be stored that is then made accessible to a Run either by means of mounting or copying the data to the compute target. A datastore can either be backed by an Azure Blob Storage or and Azure File Share (ADLS will be supported in the future). In the next step, we will use Azure Blob Storage and upload the training and test set into the Azure Blob datastore, which we will then later be mount on a Batch AI cluster for training."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ds = Datastore(workspace=ws, name=\"MyBlobDatastore\")\n",
"ds.upload(src_dir='./data/mnist', target_path='mnist', overwrite=True, show_progress=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Retrieve or create a Azure Machine Learning compute\n",
"Azure Machine Learning Compute is a service for provisioning and managing clusters of Azure virtual machines for running machine learning workloads. Let's create a new Azure Machine Learning Compute in the current workspace, if it doesn't already exist. We will then run the training script on this compute target.\n",
"\n",
"If we could not find the compute with the given name in the previous cell, then we will create a new compute here. We will create an Azure Machine Learning Compute containing **STANDARD_D2_V2 CPU VMs**. This process is broken down into the following steps:\n",
"\n",
"1. Create the configuration\n",
"2. Create the Azure Machine Learning compute\n",
"\n",
"**This process will take about 3 minutes and is providing only sparse output in the process. Please make sure to wait until the call returns before moving to the next cell.**\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cluster_name = \"aml-compute\"\n",
"\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n",
" print('Found existing compute target {}.'.format(cluster_name))\n",
"except ComputeTargetException:\n",
" print('Creating a new compute target...')\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_NC6\",\n",
" max_nodes=4)\n",
"\n",
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
" compute_target.wait_for_completion(show_output=True, timeout_in_minutes=20)\n",
"\n",
"print(\"Azure Machine Learning Compute attached\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Copy the training files into the script folder\n",
"The TensorFlow training script is already created for you. You can simply copy it into the script folder, together with the utility library used to load compressed data file into numpy array."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# the training logic is in the tf_mnist.py file.\n",
"shutil.copy('./tf_mnist.py', script_folder)\n",
"\n",
"# the utils.py just helps loading data from the downloaded MNIST dataset into numpy arrays.\n",
"shutil.copy('./utils.py', script_folder)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create TensorFlow estimator\n",
"Next, we construct an `azureml.train.dnn.TensorFlow` estimator object, use the Batch AI cluster as compute target, and pass the mount-point of the datastore to the training code as a parameter.\n",
"The TensorFlow estimator is providing a simple way of launching a TensorFlow training job on a compute target. It will automatically provide a docker image that has TensorFlow installed -- if additional pip or conda packages are required, their names can be passed in via the `pip_packages` and `conda_packages` arguments and they will be included in the resulting docker."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"est = TensorFlow(source_directory=script_folder, \n",
" compute_target=compute_target,\n",
" entry_script='tf_mnist.py', \n",
" use_gpu=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Intelligent hyperparameter tuning\n",
"We have trained the model with one set of hyperparameters, now let's how we can do hyperparameter tuning by launching multiple runs on the cluster. First let's define the parameter space using random sampling.\n",
"\n",
"In this example we will use random sampling to try different configuration sets of hyperparameters to maximize our primary metric, the best validation accuracy (`validation_acc`)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ps = RandomParameterSampling(\n",
" {\n",
" '--batch-size': choice(25, 50, 100),\n",
" '--first-layer-neurons': choice(10, 50, 200, 300, 500),\n",
" '--second-layer-neurons': choice(10, 50, 200, 500),\n",
" '--learning-rate': loguniform(-6, -1)\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we will define an early termnination policy. The `BanditPolicy` basically states to check the job every 2 iterations. If the primary metric (defined later) falls outside of the top 10% range, Azure ML terminate the job. This saves us from continuing to explore hyperparameters that don't show promise of helping reach our target metric.\n",
"\n",
"Refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-tune-hyperparameters#specify-an-early-termination-policy) for more information on the BanditPolicy and other policies available."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"early_termination_policy = BanditPolicy(evaluation_interval=2, slack_factor=0.1)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we are ready to configure a run configuration object, and specify the primary metric `validation_acc` that's recorded in your training runs. If you go back to visit the training script, you will notice that this value is being logged after every epoch (a full batch set). We also want to tell the service that we are looking to maximizing this value. We also set the number of samples to 20, and maximal concurrent job to 4, which is the same as the number of nodes in our computer cluster."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"hd_config = HyperDriveRunConfig(estimator=est, \n",
" hyperparameter_sampling=ps,\n",
" policy=early_termination_policy,\n",
" primary_metric_name='validation_acc', \n",
" primary_metric_goal=PrimaryMetricGoal.MAXIMIZE, \n",
" max_total_runs=1,\n",
" max_concurrent_runs=1)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Add HyperDrive as a step of pipeline\n",
"\n",
"Let's setup a data reference for inputs of hyperdrive step."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"data_folder = DataReference(\n",
" datastore=ds,\n",
" data_reference_name=\"mnist_data\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### HyperDriveStep\n",
"HyperDriveStep can be used to run HyperDrive job as a step in pipeline.\n",
"- **name:** Name of the step\n",
"- **hyperdrive_run_config:** A HyperDriveRunConfig that defines the configuration for this HyperDrive run\n",
"- **estimator_entry_script_arguments:** List of command-line arguments for estimator entry script\n",
"- **inputs:** List of input port bindings\n",
"- **outputs:** List of output port bindings\n",
"- **metrics_output:** Optional value specifying the location to store HyperDrive run metrics as a JSON file\n",
"- **allow_reuse:** whether to allow reuse\n",
"- **version:** version\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"hd_step = HyperDriveStep(\n",
" name=\"hyperdrive_module\",\n",
" hyperdrive_run_config=hd_config,\n",
" estimator_entry_script_arguments=['--data-folder', data_folder],\n",
" inputs=[data_folder])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Build the experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline = Pipeline(workspace=ws, steps=[hd_step])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit the experiment "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run = Experiment(ws, 'Hyperdrive_Test').submit(pipeline)\n",
"pipeline_run.wait_for_completion()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### View Run Details"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"RunDetails(pipeline_run).show()"
]
}
],
"metadata": {
"authors": [
{
"name": "sonnyp"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -33,20 +33,16 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import azureml.core\n", "import azureml.core\n",
"from azureml.core import Workspace, Run, Experiment, Datastore\n", "from azureml.core import Workspace, Datastore\n",
"from azureml.core.compute import AmlCompute\n", "from azureml.core.compute import AmlCompute\n",
"from azureml.core.compute import ComputeTarget\n", "from azureml.core.compute import ComputeTarget\n",
"from azureml.core.compute import DataFactoryCompute\n",
"from azureml.widgets import RunDetails\n",
"\n", "\n",
"# Check core SDK version number\n", "# Check core SDK version number\n",
"print(\"SDK version:\", azureml.core.VERSION)\n", "print(\"SDK version:\", azureml.core.VERSION)\n",
"\n", "\n",
"from azureml.data.data_reference import DataReference\n", "from azureml.data.data_reference import DataReference\n",
"from azureml.pipeline.core import Pipeline, PipelineData, StepSequence\n", "from azureml.pipeline.core import Pipeline, PipelineData\n",
"from azureml.pipeline.steps import PythonScriptStep\n", "from azureml.pipeline.steps import PythonScriptStep\n",
"from azureml.pipeline.steps import DataTransferStep\n",
"from azureml.pipeline.core import PublishedPipeline\n",
"from azureml.pipeline.core.graph import PipelineParameter\n", "from azureml.pipeline.core.graph import PipelineParameter\n",
"\n", "\n",
"print(\"Pipeline SDK-specific imports completed\")\n", "print(\"Pipeline SDK-specific imports completed\")\n",
@@ -79,12 +75,13 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core.compute_target import ComputeTargetException\n",
"\n", "\n",
"aml_compute_target = \"aml-compute\"\n", "aml_compute_target = \"aml-compute\"\n",
"try:\n", "try:\n",
" aml_compute = AmlCompute(ws, aml_compute_target)\n", " aml_compute = AmlCompute(ws, aml_compute_target)\n",
" print(\"found existing compute target.\")\n", " print(\"found existing compute target.\")\n",
"except:\n", "except ComputeTargetException:\n",
" print(\"creating new compute target\")\n", " print(\"creating new compute target\")\n",
" \n", " \n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\",\n", " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\",\n",
@@ -100,9 +97,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"# For a more detailed view of current Azure Machine Learning Compute status, use the 'status' property\n", "# For a more detailed view of current Azure Machine Learning Compute status, use get_status()\n",
"# example: un-comment the following line.\n", "# example: un-comment the following line.\n",
"# print(aml_compute.status.serialize())" "# print(aml_compute.get_status().serialize())"
] ]
}, },
{ {
@@ -309,11 +306,11 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core.authentication import AzureCliAuthentication\n", "from azureml.core.authentication import InteractiveLoginAuthentication\n",
"import requests\n", "import requests\n",
"\n", "\n",
"cli_auth = AzureCliAuthentication()\n", "auth = InteractiveLoginAuthentication()\n",
"aad_token = cli_auth.get_authentication_header()\n", "aad_token = auth.get_authentication_header()\n",
"\n", "\n",
"rest_endpoint1 = published_pipeline1.endpoint\n", "rest_endpoint1 = published_pipeline1.endpoint\n",
"\n", "\n",
@@ -346,9 +343,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python3" "name": "python36"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -0,0 +1,404 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to Setup a Schedule for a Published Pipeline\n",
"In this notebook, we will show you how you can run an already published pipeline on a schedule."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Prerequisites and AML Basics\n",
"Make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc.\n",
"\n",
"### Initialization Steps"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"from azureml.core import Workspace\n",
"\n",
"# Check core SDK version number\n",
"print(\"SDK version:\", azureml.core.VERSION)\n",
"\n",
"ws = Workspace.from_config()\n",
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Compute Targets\n",
"#### Retrieve an already attached Azure Machine Learning Compute"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Run, Experiment, Datastore\n",
"\n",
"from azureml.widgets import RunDetails\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
"aml_compute_target = \"aml-compute\"\n",
"try:\n",
" aml_compute = AmlCompute(ws, aml_compute_target)\n",
" print(\"Found existing compute target: {}\".format(aml_compute_target))\n",
"except:\n",
" print(\"Creating new compute target: {}\".format(aml_compute_target))\n",
" \n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\",\n",
" min_nodes = 1, \n",
" max_nodes = 4) \n",
" aml_compute = ComputeTarget.create(ws, aml_compute_target, provisioning_config)\n",
" aml_compute.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Build and Publish Pipeline\n",
"Build a simple pipeline, publish it and add a schedule to run it."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Define a pipeline step\n",
"Define a single step pipeline for demonstration purpose."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.steps import PythonScriptStep\n",
"\n",
"\n",
"# project folder\n",
"project_folder = 'scripts'\n",
"\n",
"trainStep = PythonScriptStep(\n",
" name=\"Training_Step\",\n",
" script_name=\"train.py\", \n",
" compute_target=aml_compute_target, \n",
" source_directory=project_folder\n",
")\n",
"print(\"TrainStep created\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Build the pipeline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"pipeline1 = Pipeline(workspace=ws, steps=[trainStep])\n",
"print (\"Pipeline is built\")\n",
"\n",
"pipeline1.validate()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Publish the pipeline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from datetime import datetime\n",
"\n",
"timenow = datetime.now().strftime('%m-%d-%Y-%H-%M')\n",
"\n",
"pipeline_name = timenow + \"-Pipeline\"\n",
"print(pipeline_name)\n",
"\n",
"published_pipeline1 = pipeline1.publish(\n",
" name=pipeline_name, \n",
" description=pipeline_name)\n",
"print(\"Newly published pipeline id: {}\".format(published_pipeline1.id))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Schedule Operations\n",
"Schedule operations require id of a published pipeline. You can get all published pipelines and do Schedule operations on them, or if you already know the id of the published pipeline, you can use it directly as well.\n",
"### Get published pipeline ID"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import PublishedPipeline\n",
"\n",
"# You could retrieve all pipelines that are published, or \n",
"# just get the published pipeline object that you have the ID for.\n",
"\n",
"# Get all published pipeline objects in the workspace\n",
"all_pub_pipelines = PublishedPipeline.get_all(ws)\n",
"\n",
"# We will iterate through the list of published pipelines and \n",
"# use the last ID in the list for Schelue operations: \n",
"print(\"Published pipelines found in the workspace:\")\n",
"for pub_pipeline in all_pub_pipelines:\n",
" print(pub_pipeline.id)\n",
" pub_pipeline_id = pub_pipeline.id\n",
"\n",
"print(\"Published pipeline id to be used for Schedule operations: {}\".format(pub_pipeline_id))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create a schedule for the pipeline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core.schedule import ScheduleRecurrence, Schedule\n",
"\n",
"recurrence = ScheduleRecurrence(frequency=\"Day\", interval=2, hours=[22], minutes=[30]) # Runs every other day at 10:30pm\n",
"\n",
"schedule = Schedule.create(workspace=ws, name=\"My_Schedule\",\n",
" pipeline_id=pub_pipeline_id, \n",
" experiment_name='Schedule_Run',\n",
" recurrence=recurrence,\n",
" wait_for_provisioning=True,\n",
" description=\"Schedule Run\")\n",
"\n",
"# You may want to make sure that the schedule is provisioned properly\n",
"# before making any further changes to the schedule\n",
"\n",
"print(\"Created schedule with id: {}\".format(schedule.id))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Note: Set the `wait_for_provisioning` flag to False if you do not want to wait for the call to provision the schedule in the backend."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Get all schedules for a given pipeline\n",
"Once you have the published pipeline ID, then you can get all schedules for that pipeline."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"schedules = Schedule.get_all(ws, pipeline_id=pub_pipeline_id)\n",
"\n",
"# We will iterate through the list of schedules and \n",
"# use the last ID in the list for further operations: \n",
"print(\"Found these schedules for the pipeline id {}:\".format(pub_pipeline_id))\n",
"for schedule in schedules: \n",
" print(schedule.id)\n",
" schedule_id = schedule.id\n",
"\n",
"print(\"Schedule id to be used for schedule operations: {}\".format(schedule_id))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Get all schedules in your workspace\n",
"You can also iterate through all schedules in your workspace if needed."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Use active_only=False to get all schedules including disabled schedules\n",
"schedules = Schedule.get_all(ws, active_only=True) \n",
"print(\"Your workspace has the following schedules set up:\")\n",
"for schedule in schedules:\n",
" print(\"{} (Published pipeline: {}\".format(schedule.id, schedule.pipeline_id))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Get the schedule"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fetched_schedule = Schedule.get(ws, schedule_id)\n",
"print(\"Using schedule with id: {}\".format(fetched_schedule.id))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Disable the schedule"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the wait_for_provisioning flag to False if you do not want to wait \n",
"# for the call to provision the schedule in the backend.\n",
"fetched_schedule.disable(wait_for_provisioning=True)\n",
"fetched_schedule = Schedule.get(ws, schedule_id)\n",
"print(\"Disabled schedule {}. New status is: {}\".format(fetched_schedule.id, fetched_schedule.status))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Reactivate the schedule"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the wait_for_provisioning flag to False if you do not want to wait \n",
"# for the call to provision the schedule in the backend.\n",
"fetched_schedule.activate(wait_for_provisioning=True)\n",
"fetched_schedule = Schedule.get(ws, schedule_id)\n",
"print(\"Activated schedule {}. New status is: {}\".format(fetched_schedule.id, fetched_schedule.status))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Change reccurence of the schedule"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the wait_for_provisioning flag to False if you do not want to wait \n",
"# for the call to provision the schedule in the backend.\n",
"recurrence = ScheduleRecurrence(frequency=\"Hour\", interval=2) # Runs every two hours\n",
"\n",
"fetched_schedule = Schedule.get(ws, schedule_id)\n",
"\n",
"fetched_schedule.update(name=\"My_Updated_Schedule\", \n",
" description=\"Updated_Schedule_Run\", \n",
" status='Active', \n",
" wait_for_provisioning=True,\n",
" recurrence=recurrence)\n",
"\n",
"fetched_schedule = Schedule.get_schedule(ws, fetched_schedule.id)\n",
"\n",
"print(\"Updated schedule:\", fetched_schedule.id, \n",
" \"\\nNew name:\", fetched_schedule.name,\n",
" \"\\nNew frequency:\", fetched_schedule.recurrence.frequency,\n",
" \"\\nNew status:\", fetched_schedule.status)"
]
}
],
"metadata": {
"authors": [
{
"name": "diray"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -13,7 +13,8 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"# AML Pipeline with AdlaStep\n", "# AML Pipeline with AdlaStep\n",
"This notebook is used to demonstrate the use of AdlaStep in AML Pipeline." "\n",
"This notebook is used to demonstrate the use of AdlaStep in AML Pipelines. [AdlaStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.adla_step.adlastep?view=azure-ml-py) is used to run U-SQL scripts using Azure Data Lake Analytics service. "
] ]
}, },
{ {
@@ -30,15 +31,16 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import os\n", "import os\n",
"from msrest.exceptions import HttpOperationError\n",
"\n",
"import azureml.core\n", "import azureml.core\n",
"from azureml.core.compute import ComputeTarget, DatabricksCompute\n",
"from azureml.exceptions import ComputeTargetException\n", "from azureml.exceptions import ComputeTargetException\n",
"from azureml.core import Workspace, Run, Experiment\n", "from azureml.core import Workspace, Experiment\n",
"from azureml.pipeline.core import Pipeline, PipelineData\n", "from azureml.core.compute import ComputeTarget, AdlaCompute\n",
"from azureml.pipeline.steps import AdlaStep\n",
"from azureml.core.datastore import Datastore\n", "from azureml.core.datastore import Datastore\n",
"from azureml.data.data_reference import DataReference\n", "from azureml.data.data_reference import DataReference\n",
"from azureml.core import attach_legacy_compute_target\n", "from azureml.pipeline.core import Pipeline, PipelineData\n",
"from azureml.pipeline.steps import AdlaStep\n",
"\n", "\n",
"# Check core SDK version number\n", "# Check core SDK version number\n",
"print(\"SDK version:\", azureml.core.VERSION)" "print(\"SDK version:\", azureml.core.VERSION)"
@@ -67,22 +69,57 @@
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')" "print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Attach ADLA account to workspace\n",
"\n",
"To submit jobs to Azure Data Lake Analytics service, you must first attach your ADLA account to the workspace. You'll need to provide the account name and resource group of ADLA account to complete this part."
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"script_folder = '.'\n", "adla_compute_name = 'testadl' # Name to associate with new compute in workspace\n",
"experiment_name = \"adla_101_experiment\"\n", "\n",
"ws._initialize_folder(experiment_name=experiment_name, directory=script_folder)" "# ADLA account details needed to attach as compute to workspace\n",
"adla_account_name = \"<adla_account_name>\" # Name of the Azure Data Lake Analytics account\n",
"adla_resource_group = \"<adla_resource_group>\" # Name of the resource group which contains this account\n",
"\n",
"try:\n",
" # check if already attached\n",
" adla_compute = AdlaCompute(ws, adla_compute_name)\n",
"except ComputeTargetException:\n",
" print('attaching adla compute...')\n",
" attach_config = AdlaCompute.attach_configuration(resource_group=adla_resource_group, account_name=adla_account_name)\n",
" adla_compute = ComputeTarget.attach(ws, adla_compute_name, attach_config)\n",
" adla_compute.wait_for_completion()\n",
"\n",
"print(\"Using ADLA compute:{}\".format(adla_compute.cluster_resource_id))\n",
"print(\"Provisioning state:{}\".format(adla_compute.provisioning_state))\n",
"print(\"Provisioning errors:{}\".format(adla_compute.provisioning_errors))"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Register Datastore" "## Register Data Lake Storage as Datastore\n",
"\n",
"To register Data Lake Storage as Datastore in workspace, you'll need account information like account name, resource group and subscription Id. \n",
"\n",
"> AdlaStep can only work with data stored in the **default** Data Lake Storage of the Data Lake Analytics account provided above. If the data you need to work with is in a non-default storage, you can use a DataTransferStep to copy the data before training. You can find the default storage by opening your Data Lake Analytics account in Azure portal and then navigating to 'Data sources' item under Settings in the left pane.\n",
"\n",
"### Grant Azure AD application access to Data Lake Storage\n",
"\n",
"You'll also need to provide an Active Directory application which can access Data Lake Storage. [This document](https://docs.microsoft.com/en-us/azure/data-lake-store/data-lake-store-service-to-service-authenticate-using-active-directory) contains step-by-step instructions on how to create an AAD application and assign to Data Lake Storage. Couple of important notes when assigning permissions to AAD app:\n",
"\n",
"- Access should be provided at root folder level.\n",
"- In 'Assign permissions' pane, select Read, Write, and Execute permissions for 'This folder and all children'. Add as 'An access permission entry and a default permission entry' to make sure application can access any new files created in the future."
] ]
}, },
{ {
@@ -91,20 +128,20 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"datastore_name = 'MyAdlsDatastore' # Name to associate with data store in workspace\n",
"\n", "\n",
"workspace = ws.name\n", "# ADLS storage account details needed to register as a Datastore\n",
"datastore_name='MyAdlsDatastore'\n", "subscription_id = os.getenv(\"ADL_SUBSCRIPTION_62\", \"<my-subscription-id>\") # subscription id of ADLS account\n",
"subscription_id=os.getenv(\"ADL_SUBSCRIPTION_62\", \"<my-subscription-id>\") # subscription id of ADLS account\n", "resource_group = os.getenv(\"ADL_RESOURCE_GROUP_62\", \"<my-resource-group>\") # resource group of ADLS account\n",
"resource_group=os.getenv(\"ADL_RESOURCE_GROUP_62\", \"<my-resource-group>\") # resource group of ADLS account\n", "store_name = os.getenv(\"ADL_STORENAME_62\", \"<my-datastore-name>\") # ADLS account name\n",
"store_name=os.getenv(\"ADL_STORENAME_62\", \"<my-datastore-name>\") # ADLS account name\n", "tenant_id = os.getenv(\"ADL_TENANT_62\", \"<my-tenant-id>\") # tenant id of service principal\n",
"tenant_id=os.getenv(\"ADL_TENANT_62\", \"<my-tenant-id>\") # tenant id of service principal\n", "client_id = os.getenv(\"ADL_CLIENTID_62\", \"<my-client-id>\") # client id of service principal\n",
"client_id=os.getenv(\"ADL_CLIENTID_62\", \"<my-client-id>\") # client id of service principal\n", "client_secret = os.getenv(\"ADL_CLIENT_62_SECRET\", \"<my-client-secret>\") # the secret of service principal\n",
"client_secret=os.getenv(\"ADL_CLIENT_62_SECRET\", \"<my-client-secret>\") # the secret of service principal\n",
"\n", "\n",
"try:\n", "try:\n",
" adls_datastore = Datastore.get(ws, datastore_name)\n", " adls_datastore = Datastore.get(ws, datastore_name)\n",
" print(\"found datastore with name: %s\" % datastore_name)\n", " print(\"found datastore with name: %s\" % datastore_name)\n",
"except:\n", "except HttpOperationError:\n",
" adls_datastore = Datastore.register_azure_data_lake(\n", " adls_datastore = Datastore.register_azure_data_lake(\n",
" workspace=ws,\n", " workspace=ws,\n",
" datastore_name=datastore_name,\n", " datastore_name=datastore_name,\n",
@@ -114,16 +151,16 @@
" tenant_id=tenant_id, # tenant id of service principal\n", " tenant_id=tenant_id, # tenant id of service principal\n",
" client_id=client_id, # client id of service principal\n", " client_id=client_id, # client id of service principal\n",
" client_secret=client_secret) # the secret of service principal\n", " client_secret=client_secret) # the secret of service principal\n",
" print(\"registered datastore with name: %s\" % datastore_name)\n" " print(\"registered datastore with name: %s\" % datastore_name)"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Create DataReferences and PipelineData\n", "## Setup inputs and outputs\n",
"\n", "\n",
"In the code cell below, replace datastorename with your default datastore name. Copy the file `testdata.txt` (located in the pipeline folder that this notebook is in) to the path on the datastore." "For purpose of this demo, we're going to execute a simple U-SQL script that reads a CSV file and writes portion of content to a new text file. First, let's create our sample input which contains 3 columns: employee Id, name and department Id."
] ]
}, },
{ {
@@ -132,26 +169,51 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"datastorename = \"MyAdlsDatastore\"\n", "# create a folder to store files for our job\n",
"sample_folder = \"adla_sample\"\n",
"\n", "\n",
"adls_datastore = Datastore(workspace=ws, name=datastorename)\n", "if not os.path.isdir(sample_folder):\n",
"script_input = DataReference(\n", " os.mkdir(sample_folder)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%writefile $sample_folder/sample_input.csv\n",
"1, Noah, 100\n",
"3, Liam, 100\n",
"4, Emma, 100\n",
"5, Jacob, 100\n",
"7, Jennie, 100"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Upload this file to Data Lake Storage at location `adla_sample/sample_input.csv` and create a DataReference to refer to this file."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sample_input = DataReference(\n",
" datastore=adls_datastore,\n", " datastore=adls_datastore,\n",
" data_reference_name=\"script_input\",\n", " data_reference_name=\"employee_data\",\n",
" path_on_datastore=\"testdata/testdata.txt\")\n", " path_on_datastore=\"adla_sample/sample_input.csv\")"
"\n",
"script_output = PipelineData(\"script_output\", datastore=adls_datastore)\n",
"\n",
"print(\"Created Pipeline Data\")"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Setup Data Lake Account\n", "Create PipelineData object to store output produced by AdlaStep."
"\n",
"ADLA can only use data that is located in the default data store associated with that ADLA account. Through Azure portal, check the name of the default data store corresponding to the ADLA account you are using below. Replace the value associated with `adla_compute_name` in the code cell below accordingly."
] ]
}, },
{ {
@@ -160,35 +222,23 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"adla_compute_name = 'testadl' # Replace this with your default compute\n", "sample_output = PipelineData(\"sample_output\", datastore=adls_datastore)"
"\n",
"from azureml.core.compute import ComputeTarget, AdlaCompute\n",
"\n",
"def get_or_create_adla_compute(workspace, compute_name):\n",
" try:\n",
" return AdlaCompute(workspace, compute_name)\n",
" except ComputeTargetException as e:\n",
" if 'ComputeTargetNotFound' in e.message:\n",
" print('adla compute not found, creating...')\n",
" provisioning_config = AdlaCompute.provisioning_configuration()\n",
" adla_compute = ComputeTarget.create(workspace, compute_name, provisioning_config)\n",
" adla_compute.wait_for_completion()\n",
" return adla_compute\n",
" else:\n",
" raise e\n",
" \n",
"adla_compute = get_or_create_adla_compute(ws, adla_compute_name)\n",
"\n",
"# CLI:\n",
"# Create: az ml computetarget setup adla -n <name>\n",
"# BYOC: az ml computetarget attach adla -n <name> -i <resource-id>"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"Once the above code cell completes, run the below to check your ADLA compute status:" "## Write your U-SQL script\n",
"\n",
"Now let's write a U-Sql script that reads above CSV file and writes the name column to a new file.\n",
"\n",
"Instead of hard-coding paths in your script, you can use `@@name@@` syntax to refer to inputs, outputs, and parameters.\n",
"\n",
"- If `name` is the name of an input or output port binding, any occurrences of `@@name@@` in the script are replaced with actual data path of corresponding port binding.\n",
"- If `name` matches any key in the `params` dictionary, any occurrences of `@@name@@` will be replaced with corresponding value in the dictionary.\n",
"\n",
"Note the use of @@ syntax in the below script. Before submitting the job to Data Lake Analytics service, `@@emplyee_data@@` will be replaced with actual path of `sample_input.csv` in Data Lake Storage. Similarly, `@@sample_output@@` will be replaced with a path in Data Lake Storage which will be used to store intermediate output produced by the step."
] ]
}, },
{ {
@@ -197,58 +247,43 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"ADLA compute state:{}\".format(adla_compute.provisioning_state))\n", "%%writefile $sample_folder/sample_script.usql\n",
"print(\"ADLA compute state:{}\".format(adla_compute.provisioning_errors))\n", "\n",
"print(\"Using ADLA compute:{}\".format(adla_compute.cluster_resource_id))" "// Read employee information from csv file\n",
"@employees = \n",
" EXTRACT EmpId int, EmpName string, DeptId int\n",
" FROM \"@@employee_data@@\"\n",
" USING Extractors.Csv();\n",
"\n",
"// Export employee names to text file\n",
"OUTPUT\n",
"(\n",
" SELECT EmpName\n",
" FROM @employees\n",
")\n",
"TO \"@@sample_output@@\"\n",
"USING Outputters.Text();"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Create an AdlaStep" "## Create an AdlaStep\n",
] "\n",
}, "**[AdlaStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.adla_step.adlastep?view=azure-ml-py)** is used to run U-SQL script using Azure Data Lake Analytics.\n",
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**AdlaStep** is used to run U-SQL script using Azure Data Lake Analytics.\n",
"\n", "\n",
"- **name:** Name of module\n", "- **name:** Name of module\n",
"- **script_name:** name of U-SQL script\n", "- **script_name:** name of U-SQL script file\n",
"- **inputs:** List of input port bindings\n", "- **inputs:** List of input port bindings\n",
"- **outputs:** List of output port bindings\n", "- **outputs:** List of output port bindings\n",
"- **adla_compute:** the ADLA compute to use for this job\n", "- **compute_target:** the ADLA compute to use for this job\n",
"- **params:** Dictionary of name-value pairs to pass to U-SQL job *(optional)*\n", "- **params:** Dictionary of name-value pairs to pass to U-SQL job *(optional)*\n",
"- **degree_of_parallelism:** the degree of parallelism to use for this job *(optional)*\n", "- **degree_of_parallelism:** the degree of parallelism to use for this job *(optional)*\n",
"- **priority:** the priority value to use for the current job *(optional)*\n", "- **priority:** the priority value to use for the current job *(optional)*\n",
"- **runtime_version:** the runtime version of the Data Lake Analytics engine *(optional)*\n", "- **runtime_version:** the runtime version of the Data Lake Analytics engine *(optional)*\n",
"- **root_folder:** folder that contains the script, assemblies etc. *(optional)*\n", "- **source_directory:** folder that contains the script, assemblies etc. *(optional)*\n",
"- **hash_paths:** list of paths to hash to detect a change (script file is always hashed) *(optional)*\n", "- **hash_paths:** list of paths to hash to detect a change (script file is always hashed) *(optional)*"
"\n",
"### Remarks\n",
"\n",
"You can use `@@name@@` syntax in your script to refer to inputs, outputs, and params.\n",
"\n",
"* if `name` is the name of an input or output port binding, any occurences of `@@name@@` in the script\n",
"are replaced with actual data path of corresponding port binding.\n",
"* if `name` matches any key in `params` dict, any occurences of `@@name@@` will be replaced with\n",
"corresponding value in dict.\n",
"\n",
"#### Sample script\n",
"\n",
"```\n",
"@resourcereader =\n",
" EXTRACT query string\n",
" FROM \"@@script_input@@\"\n",
" USING Extractors.Csv();\n",
"\n",
"\n",
"OUTPUT @resourcereader\n",
"TO \"@@script_output@@\"\n",
"USING Outputters.Csv();\n",
"```"
] ]
}, },
{ {
@@ -258,10 +293,11 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"adla_step = AdlaStep(\n", "adla_step = AdlaStep(\n",
" name='adla_script_step',\n", " name='extract_employee_names',\n",
" script_name='test_adla_script.usql',\n", " script_name='sample_script.usql',\n",
" inputs=[script_input],\n", " source_directory=sample_folder,\n",
" outputs=[script_output],\n", " inputs=[sample_input],\n",
" outputs=[sample_output],\n",
" compute_target=adla_compute)" " compute_target=adla_compute)"
] ]
}, },
@@ -278,13 +314,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"pipeline = Pipeline(\n", "pipeline = Pipeline(workspace=ws, steps=[adla_step])\n",
" description=\"adla_102\",\n",
" workspace=ws, \n",
" steps=[adla_step],\n",
" default_source_directory=script_folder)\n",
"\n", "\n",
"pipeline_run = Experiment(workspace, experiment_name).submit(pipeline)\n", "pipeline_run = Experiment(ws, 'adla_sample').submit(pipeline)\n",
"pipeline_run.wait_for_completion()" "pipeline_run.wait_for_completion()"
] ]
}, },
@@ -304,39 +336,6 @@
"from azureml.widgets import RunDetails\n", "from azureml.widgets import RunDetails\n",
"RunDetails(pipeline_run).show()" "RunDetails(pipeline_run).show()"
] ]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Examine the run\n",
"You can cycle through the node_run objects and examine job logs, stdout, and stderr of each of the steps."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"step_runs = pipeline_run.get_children()\n",
"for step_run in step_runs:\n",
" status = step_run.get_status()\n",
" print('node', step_run.name, 'status:', status)\n",
" if status == \"Failed\":\n",
" joblog = step_run.get_job_log()\n",
" print('job log:', joblog)\n",
" stdout_log = step_run.get_stdout_log()\n",
" print('stdout log:', stdout_log)\n",
" stderr_log = step_run.get_stderr_log()\n",
" print('stderr log:', stderr_log)\n",
" with open(\"logs-\" + step_run.name + \".txt\", \"w\") as f:\n",
" f.write(joblog)\n",
" print(\"Job log written to logs-\"+ step_run.name + \".txt\")\n",
" if status == \"Finished\":\n",
" stdout_log = step_run.get_stdout_log()\n",
" print('stdout log:', stdout_log)"
]
} }
], ],
"metadata": { "metadata": {
@@ -346,9 +345,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python [default]", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python3" "name": "python36"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -89,7 +89,7 @@
"from azureml.core.runconfig import JarLibrary\n", "from azureml.core.runconfig import JarLibrary\n",
"from azureml.core.compute import ComputeTarget, DatabricksCompute\n", "from azureml.core.compute import ComputeTarget, DatabricksCompute\n",
"from azureml.exceptions import ComputeTargetException\n", "from azureml.exceptions import ComputeTargetException\n",
"from azureml.core import Workspace, Run, Experiment\n", "from azureml.core import Workspace, Experiment\n",
"from azureml.pipeline.core import Pipeline, PipelineData\n", "from azureml.pipeline.core import Pipeline, PipelineData\n",
"from azureml.pipeline.steps import DatabricksStep\n", "from azureml.pipeline.steps import DatabricksStep\n",
"from azureml.core.datastore import Datastore\n", "from azureml.core.datastore import Datastore\n",
@@ -146,7 +146,7 @@
"db_access_token=os.getenv(\"DATABRICKS_ACCESS_TOKEN\", \"<my-access-token>\") # Databricks access token\n", "db_access_token=os.getenv(\"DATABRICKS_ACCESS_TOKEN\", \"<my-access-token>\") # Databricks access token\n",
" \n", " \n",
"try:\n", "try:\n",
" databricks_compute = ComputeTarget(workspace=ws, name=db_compute_name)\n", " databricks_compute = DatabricksCompute(workspace=ws, name=db_compute_name)\n",
" print('Compute target {} already exists'.format(db_compute_name))\n", " print('Compute target {} already exists'.format(db_compute_name))\n",
"except ComputeTargetException:\n", "except ComputeTargetException:\n",
" print('Compute not found, will use below parameters to attach new one')\n", " print('Compute not found, will use below parameters to attach new one')\n",
@@ -176,7 +176,7 @@
"### Type of Data Access\n", "### Type of Data Access\n",
"Databricks allows to interact with Azure Blob and ADLS in two ways.\n", "Databricks allows to interact with Azure Blob and ADLS in two ways.\n",
"- **Direct Access**: Databricks allows you to interact with Azure Blob or ADLS URIs directly. The input or output URIs will be mapped to a Databricks widget param in the Databricks notebook.\n", "- **Direct Access**: Databricks allows you to interact with Azure Blob or ADLS URIs directly. The input or output URIs will be mapped to a Databricks widget param in the Databricks notebook.\n",
"- **Mouting**: You will be supplied with additional parameters and secrets that will enable you to mount your ADLS or Azure Blob input or output location in your Databricks notebook." "- **Mounting**: You will be supplied with additional parameters and secrets that will enable you to mount your ADLS or Azure Blob input or output location in your Databricks notebook."
] ]
}, },
{ {
@@ -297,7 +297,7 @@
"print('Datastore {} will be used'.format(def_blob_store.name))\n", "print('Datastore {} will be used'.format(def_blob_store.name))\n",
"\n", "\n",
"# We are uploading a sample file in the local directory to be used as a datasource\n", "# We are uploading a sample file in the local directory to be used as a datasource\n",
"def_blob_store.upload_files([\"./testdata.txt\"], target_path=\"dbtest\", overwrite=False)\n", "def_blob_store.upload_files(files=[\"./testdata.txt\"], target_path=\"dbtest\", overwrite=False)\n",
"\n", "\n",
"step_1_input = DataReference(datastore=def_blob_store, path_on_datastore=\"dbtest\",\n", "step_1_input = DataReference(datastore=def_blob_store, path_on_datastore=\"dbtest\",\n",
" data_reference_name=\"input\")\n", " data_reference_name=\"input\")\n",
@@ -317,8 +317,9 @@
"- **existing_cluster_id:** Cluster ID of an existing Interactive cluster on the Databricks workspace. If you are providing this, do not provide any of the parameters below that are used to create a new cluster such as spark_version, node_type, etc.\n", "- **existing_cluster_id:** Cluster ID of an existing Interactive cluster on the Databricks workspace. If you are providing this, do not provide any of the parameters below that are used to create a new cluster such as spark_version, node_type, etc.\n",
"- **spark_version:** Version of spark for the databricks run cluster. default value: 4.0.x-scala2.11\n", "- **spark_version:** Version of spark for the databricks run cluster. default value: 4.0.x-scala2.11\n",
"- **node_type:** Azure vm node types for the databricks run cluster. default value: Standard_D3_v2\n", "- **node_type:** Azure vm node types for the databricks run cluster. default value: Standard_D3_v2\n",
"- **num_workers:** Number of workers for the databricks run cluster\n", "- **num_workers:** Specifies a static number of workers for the databricks run cluster\n",
"- **autoscale:** The autoscale configuration for the databricks run cluster\n", "- **min_workers:** Specifies a min number of workers to use for auto-scaling the databricks run cluster\n",
"- **max_workers:** Specifies a max number of workers to use for auto-scaling the databricks run cluster\n",
"- **spark_env_variables:** Spark environment variables for the databricks run cluster (dictionary of {str:str}). default value: {'PYSPARK_PYTHON': '/databricks/python3/bin/python3'}\n", "- **spark_env_variables:** Spark environment variables for the databricks run cluster (dictionary of {str:str}). default value: {'PYSPARK_PYTHON': '/databricks/python3/bin/python3'}\n",
"- **notebook_path:** Path to the notebook in the databricks instance. If you are providing this, do not provide python script related paramaters or JAR related parameters.\n", "- **notebook_path:** Path to the notebook in the databricks instance. If you are providing this, do not provide python script related paramaters or JAR related parameters.\n",
"- **notebook_params:** Parameters for the databricks notebook (dictionary of {str:str}). Fetch this inside the notebook using dbutils.widgets.get(\"myparam\")\n", "- **notebook_params:** Parameters for the databricks notebook (dictionary of {str:str}). Fetch this inside the notebook using dbutils.widgets.get(\"myparam\")\n",
@@ -342,11 +343,12 @@
"- **version:** Optional version tag to denote a change in functionality for the step\n", "- **version:** Optional version tag to denote a change in functionality for the step\n",
"\n", "\n",
"\\* *denotes required fields* \n", "\\* *denotes required fields* \n",
"*You must provide exactly one of num_workers or autoscale paramaters* \n", "*You must provide exactly one of num_workers or min_workers and max_workers paramaters* \n",
"*You must provide exactly one of databricks_compute or databricks_compute_name parameters*\n", "*You must provide exactly one of databricks_compute or databricks_compute_name parameters*\n",
"\n", "\n",
"## Use runconfig to specify library dependencies\n", "## Use runconfig to specify library dependencies\n",
"You can use a runconfig to specify the library dependencies for your cluster in Databricks. The runconfig will contain a databricks section as follows:\n", "You can use a runconfig to specify the library dependencies for your cluster in Databricks. The runconfig will contain a databricks section as follows:\n",
"\n",
"```yaml\n", "```yaml\n",
"environment:\n", "environment:\n",
"# Databricks details\n", "# Databricks details\n",
@@ -364,14 +366,21 @@
" repo: ''\n", " repo: ''\n",
"# List of RCran libraries\n", "# List of RCran libraries\n",
" rcranLibraries:\n", " rcranLibraries:\n",
" - package: ada\n", " -\n",
"# Coordinates.\n",
" package: ada\n",
"# Repo\n",
" repo: http://cran.us.r-project.org\n", " repo: http://cran.us.r-project.org\n",
"# List of JAR libraries\n", "# List of JAR libraries\n",
" jarLibraries:\n", " jarLibraries:\n",
" - library: dbfs:/mnt/libraries/library.jar\n", " -\n",
"# Coordinates.\n",
" library: dbfs:/mnt/libraries/library.jar\n",
"# List of Egg libraries\n", "# List of Egg libraries\n",
" eggLibraries:\n", " eggLibraries:\n",
" - library: dbfs:/mnt/libraries/library.egg\n", " -\n",
"# Coordinates.\n",
" library: dbfs:/mnt/libraries/library.egg\n",
"```\n", "```\n",
"\n", "\n",
"You can then create a RunConfiguration object using this file and pass it as the runconfig parameter to DatabricksStep.\n", "You can then create a RunConfiguration object using this file and pass it as the runconfig parameter to DatabricksStep.\n",
@@ -388,7 +397,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"### 1. Running the demo notebook already added to the Databricks workspace\n", "### 1. Running the demo notebook already added to the Databricks workspace\n",
"Create a notebook in the Azure Databricks workspace, and provide the path to that notebook as the value associated with the environment variable \"DATABRICKS_NOTEBOOK_PATH\". This will then set the variable notebook_path when you run the code cell below:" "Create a notebook in the Azure Databricks workspace, and provide the path to that notebook as the value associated with the environment variable \"DATABRICKS_NOTEBOOK_PATH\". This will then set the variable\u00c2\u00a0notebook_path\u00c2\u00a0when you run the code cell below:"
] ]
}, },
{ {
@@ -408,7 +417,7 @@
" notebook_params={'myparam': 'testparam'},\n", " notebook_params={'myparam': 'testparam'},\n",
" run_name='DB_Notebook_demo',\n", " run_name='DB_Notebook_demo',\n",
" compute_target=databricks_compute,\n", " compute_target=databricks_compute,\n",
" allow_reuse=False\n", " allow_reuse=True\n",
")" ")"
] ]
}, },
@@ -425,11 +434,10 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"#PUBLISHONLY\n", "steps = [dbNbStep]\n",
"#steps = [dbNbStep]\n", "pipeline = Pipeline(workspace=ws, steps=steps)\n",
"#pipeline = Pipeline(workspace=ws, steps=steps)\n", "pipeline_run = Experiment(ws, 'DB_Notebook_demo').submit(pipeline)\n",
"#pipeline_run = Experiment(ws, 'DB_Notebook_demo').submit(pipeline)\n", "pipeline_run.wait_for_completion()"
"#pipeline_run.wait_for_completion()"
] ]
}, },
{ {
@@ -445,23 +453,24 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"#PUBLISHONLY\n", "from azureml.widgets import RunDetails\n",
"#from azureml.widgets import RunDetails\n", "RunDetails(pipeline_run).show()"
"#RunDetails(pipeline_run).show()"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### 2. Running a Python script that is already added in DBFS\n", "### 2. Running a Python script from DBFS\n",
"To run a Python script that is already uploaded to DBFS, follow the instructions below. You will first upload the Python script to DBFS using the [CLI](https://docs.azuredatabricks.net/user-guide/dbfs-databricks-file-system.html).\n", "This shows how to run a Python script in DBFS. \n",
"\n", "\n",
"The commented out code in the below cell assumes that you have uploaded `train-db-dbfs.py` to the root folder in DBFS. You can upload `train-db-dbfs.py` to the root folder in DBFS using this commandline so you can use `python_script_path = \"dbfs:/train-db-dbfs.py\"`:\n", "To complete this, you will need to first upload the Python script in your local machine to DBFS using the [CLI](https://docs.azuredatabricks.net/user-guide/dbfs-databricks-file-system.html). The CLI command is given below:\n",
"\n", "\n",
"```\n", "```\n",
"dbfs cp ./train-db-dbfs.py dbfs:/train-db-dbfs.py\n", "dbfs cp ./train-db-dbfs.py dbfs:/train-db-dbfs.py\n",
"```" "```\n",
"\n",
"The code in the below cell assumes that you have completed the previous step of uploading the script `train-db-dbfs.py` to the root folder in DBFS."
] ]
}, },
{ {
@@ -470,7 +479,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"python_script_path = \"dbfs:/train-db-dbfs.py\"\n", "python_script_path = os.getenv(\"DATABRICKS_PYTHON_SCRIPT_PATH\", \"<my-databricks-python-script-path>\") # Databricks python script path\n",
"\n", "\n",
"dbPythonInDbfsStep = DatabricksStep(\n", "dbPythonInDbfsStep = DatabricksStep(\n",
" name=\"DBPythonInDBFS\",\n", " name=\"DBPythonInDBFS\",\n",
@@ -480,7 +489,7 @@
" python_script_params={'--input_data'},\n", " python_script_params={'--input_data'},\n",
" run_name='DB_Python_demo',\n", " run_name='DB_Python_demo',\n",
" compute_target=databricks_compute,\n", " compute_target=databricks_compute,\n",
" allow_reuse=False\n", " allow_reuse=True\n",
")" ")"
] ]
}, },
@@ -497,11 +506,10 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"#PUBLISHONLY\n", "steps = [dbPythonInDbfsStep]\n",
"#steps = [dbPythonInDbfsStep]\n", "pipeline = Pipeline(workspace=ws, steps=steps)\n",
"#pipeline = Pipeline(workspace=ws, steps=steps)\n", "pipeline_run = Experiment(ws, 'DB_Python_demo').submit(pipeline)\n",
"#pipeline_run = Experiment(ws, 'DB_Python_demo').submit(pipeline)\n", "pipeline_run.wait_for_completion()"
"#pipeline_run.wait_for_completion()"
] ]
}, },
{ {
@@ -517,9 +525,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"#PUBLISHONLY\n", "from azureml.widgets import RunDetails\n",
"#from azureml.widgets import RunDetails\n", "RunDetails(pipeline_run).show()"
"#RunDetails(pipeline_run).show()"
] ]
}, },
{ {
@@ -551,7 +558,7 @@
" source_directory=source_directory,\n", " source_directory=source_directory,\n",
" run_name='DB_Python_Local_demo',\n", " run_name='DB_Python_Local_demo',\n",
" compute_target=databricks_compute,\n", " compute_target=databricks_compute,\n",
" allow_reuse=False\n", " allow_reuse=True\n",
")" ")"
] ]
}, },
@@ -612,7 +619,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"main_jar_class_name = \"com.microsoft.aeva.Main\"\n", "main_jar_class_name = \"com.microsoft.aeva.Main\"\n",
"jar_library_dbfs_path = \"dbfs:/train-db-dbfs.jar\"\n", "jar_library_dbfs_path = os.getenv(\"DATABRICKS_JAR_LIB_PATH\", \"<my-databricks-jar-lib-path>\") # Databricks jar library path\n",
"\n", "\n",
"dbJarInDbfsStep = DatabricksStep(\n", "dbJarInDbfsStep = DatabricksStep(\n",
" name=\"DBJarInDBFS\",\n", " name=\"DBJarInDBFS\",\n",
@@ -623,7 +630,7 @@
" run_name='DB_JAR_demo',\n", " run_name='DB_JAR_demo',\n",
" jar_libraries=[JarLibrary(jar_library_dbfs_path)],\n", " jar_libraries=[JarLibrary(jar_library_dbfs_path)],\n",
" compute_target=databricks_compute,\n", " compute_target=databricks_compute,\n",
" allow_reuse=False\n", " allow_reuse=True\n",
")" ")"
] ]
}, },
@@ -640,11 +647,10 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"#PUBLISHONLY\n", "steps = [dbJarInDbfsStep]\n",
"#steps = [dbJarInDbfsStep]\n", "pipeline = Pipeline(workspace=ws, steps=steps)\n",
"#pipeline = Pipeline(workspace=ws, steps=steps)\n", "pipeline_run = Experiment(ws, 'DB_JAR_demo').submit(pipeline)\n",
"#pipeline_run = Experiment(ws, 'DB_JAR_demo').submit(pipeline)\n", "pipeline_run.wait_for_completion()"
"#pipeline_run.wait_for_completion()"
] ]
}, },
{ {
@@ -660,9 +666,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"#PUBLISHONLY\n", "from azureml.widgets import RunDetails\n",
"#from azureml.widgets import RunDetails\n", "RunDetails(pipeline_run).show()"
"#RunDetails(pipeline_run).show()"
] ]
}, },
{ {
@@ -681,9 +686,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python3" "name": "python36"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -33,22 +33,17 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import azureml.core\n", "import azureml.core\n",
"from azureml.core import Workspace, Run, Experiment, Datastore\n", "from azureml.core import Workspace, Experiment, Datastore\n",
"from azureml.core.compute import AmlCompute\n", "from azureml.core.compute import AmlCompute\n",
"from azureml.core.compute import ComputeTarget\n", "from azureml.core.compute import ComputeTarget\n",
"from azureml.core.compute import DataFactoryCompute\n",
"from azureml.widgets import RunDetails\n", "from azureml.widgets import RunDetails\n",
"\n", "\n",
"# Check core SDK version number\n", "# Check core SDK version number\n",
"print(\"SDK version:\", azureml.core.VERSION)\n", "print(\"SDK version:\", azureml.core.VERSION)\n",
"\n", "\n",
"from azureml.data.data_reference import DataReference\n", "from azureml.data.data_reference import DataReference\n",
"from azureml.pipeline.core import Pipeline, PipelineData, StepSequence\n", "from azureml.pipeline.core import Pipeline, PipelineData\n",
"from azureml.pipeline.steps import PythonScriptStep\n", "from azureml.pipeline.steps import PythonScriptStep\n",
"from azureml.pipeline.steps import DataTransferStep\n",
"from azureml.pipeline.core import PublishedPipeline\n",
"from azureml.pipeline.core.graph import PipelineParameter\n",
"\n",
"print(\"Pipeline SDK-specific imports completed\")" "print(\"Pipeline SDK-specific imports completed\")"
] ]
}, },
@@ -135,12 +130,13 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core.compute_target import ComputeTargetException\n",
"\n", "\n",
"aml_compute_target = \"aml-compute\"\n", "aml_compute_target = \"aml-compute\"\n",
"try:\n", "try:\n",
" aml_compute = AmlCompute(ws, aml_compute_target)\n", " aml_compute = AmlCompute(ws, aml_compute_target)\n",
" print(\"found existing compute target.\")\n", " print(\"found existing compute target.\")\n",
"except:\n", "except ComputeTargetException:\n",
" print(\"creating new compute target\")\n", " print(\"creating new compute target\")\n",
" \n", " \n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\",\n", " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\",\n",
@@ -158,9 +154,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"# For a more detailed view of current Azure Machine Learning Compute status, use the 'status' property\n", "# For a more detailed view of current Azure Machine Learning Compute status, use get_status()\n",
"# example: un-comment the following line.\n", "# example: un-comment the following line.\n",
"# print(aml_compute.status.serialize())" "# print(aml_compute.get_status().serialize())"
] ]
}, },
{ {
@@ -396,9 +392,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python3" "name": "python36"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -0,0 +1,106 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import argparse
import os
import tensorflow as tf
from azureml.core import Run
from utils import load_data
print("TensorFlow version:", tf.VERSION)
parser = argparse.ArgumentParser()
parser.add_argument('--data-folder', type=str, dest='data_folder', help='data folder mounting point')
parser.add_argument('--batch-size', type=int, dest='batch_size', default=50, help='mini batch size for training')
parser.add_argument('--first-layer-neurons', type=int, dest='n_hidden_1', default=100,
help='# of neurons in the first layer')
parser.add_argument('--second-layer-neurons', type=int, dest='n_hidden_2', default=100,
help='# of neurons in the second layer')
parser.add_argument('--learning-rate', type=float, dest='learning_rate', default=0.01, help='learning rate')
args = parser.parse_args()
data_folder = os.path.join(args.data_folder, 'mnist')
print('training dataset is stored here:', data_folder)
X_train = load_data(os.path.join(data_folder, 'train-images.gz'), False) / 255.0
X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0
y_train = load_data(os.path.join(data_folder, 'train-labels.gz'), True).reshape(-1)
y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape, sep='\n')
training_set_size = X_train.shape[0]
n_inputs = 28 * 28
n_h1 = args.n_hidden_1
n_h2 = args.n_hidden_2
n_outputs = 10
learning_rate = args.learning_rate
n_epochs = 50
batch_size = args.batch_size
with tf.name_scope('network'):
# construct the DNN
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name='X')
y = tf.placeholder(tf.int64, shape=(None), name='y')
h1 = tf.layers.dense(X, n_h1, activation=tf.nn.relu, name='h1')
h2 = tf.layers.dense(h1, n_h2, activation=tf.nn.relu, name='h2')
output = tf.layers.dense(h2, n_outputs, name='output')
with tf.name_scope('train'):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=output)
loss = tf.reduce_mean(cross_entropy, name='loss')
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss)
with tf.name_scope('eval'):
correct = tf.nn.in_top_k(output, y, 1)
acc_op = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# start an Azure ML run
run = Run.get_context()
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
# randomly shuffle training set
indices = np.random.permutation(training_set_size)
X_train = X_train[indices]
y_train = y_train[indices]
# batch index
b_start = 0
b_end = b_start + batch_size
for _ in range(training_set_size // batch_size):
# get a batch
X_batch, y_batch = X_train[b_start: b_end], y_train[b_start: b_end]
# update batch index for the next batch
b_start = b_start + batch_size
b_end = min(b_start + batch_size, training_set_size)
# train
sess.run(train_op, feed_dict={X: X_batch, y: y_batch})
# evaluate training set
acc_train = acc_op.eval(feed_dict={X: X_batch, y: y_batch})
# evaluate validation set
acc_val = acc_op.eval(feed_dict={X: X_test, y: y_test})
# log accuracies
run.log('training_acc', np.float(acc_train))
run.log('validation_acc', np.float(acc_val))
print(epoch, '-- Training accuracy:', acc_train, '\b Validation accuracy:', acc_val)
y_hat = np.argmax(output.eval(feed_dict={X: X_test}), axis=1)
run.log('final_acc', np.float(acc_val))
os.makedirs('./outputs/model', exist_ok=True)
# files saved in the "./outputs" folder are automatically uploaded into run history
saver.save(sess, './outputs/model/mnist-tf.model')

View File

@@ -0,0 +1,27 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import gzip
import numpy as np
import struct
# load compressed MNIST gz files and return numpy arrays
def load_data(filename, label=False):
with gzip.open(filename) as gz:
struct.unpack('I', gz.read(4))
n_items = struct.unpack('>I', gz.read(4))
if not label:
n_rows = struct.unpack('>I', gz.read(4))[0]
n_cols = struct.unpack('>I', gz.read(4))[0]
res = np.frombuffer(gz.read(n_items[0] * n_rows * n_cols), dtype=np.uint8)
res = res.reshape(n_items[0], n_rows * n_cols)
else:
res = np.frombuffer(gz.read(n_items[0]), dtype=np.uint8)
res = res.reshape(n_items[0], 1)
return res
# one-hot encode a 1-D array
def one_hot_encode(array, num_of_classes):
return np.eye(num_of_classes)[array.reshape(-1)]

View File

@@ -37,10 +37,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core import Datastore\n",
"from azureml.core import Experiment\n", "from azureml.core import Experiment\n",
"from azureml.core.compute import AmlCompute, ComputeTarget\n", "from azureml.core.compute import AmlCompute, ComputeTarget\n",
"from azureml.core.conda_dependencies import CondaDependencies\n",
"from azureml.core.datastore import Datastore\n", "from azureml.core.datastore import Datastore\n",
"from azureml.core.runconfig import CondaDependencies, RunConfiguration\n", "from azureml.core.runconfig import CondaDependencies, RunConfiguration\n",
"from azureml.data.data_reference import DataReference\n", "from azureml.data.data_reference import DataReference\n",
@@ -55,7 +53,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import os\n", "import os\n",
"from azureml.core import Workspace, Run, Experiment\n", "from azureml.core import Workspace\n",
"\n", "\n",
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"print('Workspace name: ' + ws.name, \n", "print('Workspace name: ' + ws.name, \n",
@@ -76,7 +74,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Set up datastores\n", "### Set up datastores\n",
"First, lets access the datastore that has the model, labels, and images. \n", "First, let\u00e2\u20ac\u2122s access the datastore that has the model, labels, and images. \n",
"\n", "\n",
"### Create a datastore that points to a blob container containing sample images\n", "### Create a datastore that points to a blob container containing sample images\n",
"\n", "\n",
@@ -106,7 +104,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"Next, lets specify the default datastore for the outputs." "Next, let\u00e2\u20ac\u2122s specify the default datastore for the outputs."
] ]
}, },
{ {
@@ -166,8 +164,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import os\n",
"\n",
"# choose a name for your cluster\n", "# choose a name for your cluster\n",
"aml_compute_name = os.environ.get(\"AML_COMPUTE_NAME\", \"gpu-cluster\")\n", "aml_compute_name = os.environ.get(\"AML_COMPUTE_NAME\", \"gpu-cluster\")\n",
"cluster_min_nodes = os.environ.get(\"AML_COMPUTE_MIN_NODES\", 0)\n", "cluster_min_nodes = os.environ.get(\"AML_COMPUTE_MIN_NODES\", 0)\n",
@@ -193,8 +189,8 @@
" # if no min node count is provided it will use the scale settings for the cluster\n", " # if no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n", " compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
" \n", " \n",
" # For a more detailed view of current Azure Machine Learning Compute status, use the 'status' property \n", " # For a more detailed view of current Azure Machine Learning Compute status, use get_status()\n",
" print(compute_target.status.serialize())" " print(compute_target.get_status().serialize())"
] ]
}, },
{ {
@@ -295,7 +291,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Build and run the batch scoring pipeline\n", "## Build and run the batch scoring pipeline\n",
"You have everything you need to build the pipeline. Lets put all these together." "You have everything you need to build the pipeline. Let\u00e2\u20ac\u2122s put all these together."
] ]
}, },
{ {
@@ -493,11 +489,11 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core.authentication import AzureCliAuthentication\n", "from azureml.core.authentication import InteractiveLoginAuthentication\n",
"import requests\n", "import requests\n",
"\n", "\n",
"cli_auth = AzureCliAuthentication()\n", "auth = InteractiveLoginAuthentication()\n",
"aad_token = cli_auth.get_authentication_header()" "aad_token = auth.get_authentication_header()\n"
] ]
}, },
{ {
@@ -513,8 +509,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.pipeline.core import PublishedPipeline\n",
"\n",
"rest_endpoint = published_pipeline.endpoint\n", "rest_endpoint = published_pipeline.endpoint\n",
"# specify batch size when running the pipeline\n", "# specify batch size when running the pipeline\n",
"response = requests.post(rest_endpoint, \n", "response = requests.post(rest_endpoint, \n",
@@ -551,9 +545,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python3" "name": "python36"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -44,7 +44,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import os\n", "import os\n",
"from azureml.core import Workspace, Run, Experiment\n", "from azureml.core import Workspace, Experiment\n",
"\n", "\n",
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"print('Workspace name: ' + ws.name, \n", "print('Workspace name: ' + ws.name, \n",
@@ -69,7 +69,8 @@
"from azureml.data.data_reference import DataReference\n", "from azureml.data.data_reference import DataReference\n",
"from azureml.pipeline.core import Pipeline, PipelineData\n", "from azureml.pipeline.core import Pipeline, PipelineData\n",
"from azureml.pipeline.steps import PythonScriptStep, MpiStep\n", "from azureml.pipeline.steps import PythonScriptStep, MpiStep\n",
"from azureml.core.runconfig import CondaDependencies, RunConfiguration" "from azureml.core.runconfig import CondaDependencies, RunConfiguration\n",
"from azureml.core.compute_target import ComputeTargetException"
] ]
}, },
{ {
@@ -90,7 +91,7 @@
"try:\n", "try:\n",
" cpu_cluster = AmlCompute(ws, cpu_cluster_name)\n", " cpu_cluster = AmlCompute(ws, cpu_cluster_name)\n",
" print(\"found existing cluster.\")\n", " print(\"found existing cluster.\")\n",
"except:\n", "except ComputeTargetException:\n",
" print(\"creating new cluster\")\n", " print(\"creating new cluster\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_v2\",\n", " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_v2\",\n",
" max_nodes = 1)\n", " max_nodes = 1)\n",
@@ -104,7 +105,7 @@
"try:\n", "try:\n",
" gpu_cluster = AmlCompute(ws, gpu_cluster_name)\n", " gpu_cluster = AmlCompute(ws, gpu_cluster_name)\n",
" print(\"found existing cluster.\")\n", " print(\"found existing cluster.\")\n",
"except:\n", "except ComputeTargetException:\n",
" print(\"creating new cluster\")\n", " print(\"creating new cluster\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_NC6\",\n", " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_NC6\",\n",
" max_nodes = 3)\n", " max_nodes = 3)\n",
@@ -464,11 +465,11 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core.authentication import AzureCliAuthentication\n", "from azureml.core.authentication import InteractiveLoginAuthentication\n",
"import requests\n", "import requests\n",
"\n", "\n",
"cli_auth = AzureCliAuthentication()\n", "auth = InteractiveLoginAuthentication()\n",
"aad_token = cli_auth.get_authentication_header()" "aad_token = auth.get_authentication_header()\n"
] ]
}, },
{ {
@@ -526,7 +527,6 @@
" \"ParameterAssignments\": {\"style\": \"rain_princess\", \"nodecount\": 3}}) \n", " \"ParameterAssignments\": {\"style\": \"rain_princess\", \"nodecount\": 3}}) \n",
"run_id = response.json()[\"Id\"]\n", "run_id = response.json()[\"Id\"]\n",
"\n", "\n",
"from azureml.pipeline.core.run import PipelineRun\n",
"published_pipeline_run_rain = PipelineRun(ws.experiments[\"style_transfer\"], run_id)\n", "published_pipeline_run_rain = PipelineRun(ws.experiments[\"style_transfer\"], run_id)\n",
"\n", "\n",
"RunDetails(published_pipeline_run_rain).show()" "RunDetails(published_pipeline_run_rain).show()"
@@ -545,7 +545,6 @@
" \"ParameterAssignments\": {\"style\": \"udnie\", \"nodecount\": 4}}) \n", " \"ParameterAssignments\": {\"style\": \"udnie\", \"nodecount\": 4}}) \n",
"run_id = response.json()[\"Id\"]\n", "run_id = response.json()[\"Id\"]\n",
"\n", "\n",
"from azureml.pipeline.core.run import PipelineRun\n",
"published_pipeline_run_udnie = PipelineRun(ws.experiments[\"style_transfer\"], run_id)\n", "published_pipeline_run_udnie = PipelineRun(ws.experiments[\"style_transfer\"], run_id)\n",
"\n", "\n",
"RunDetails(published_pipeline_run_udnie).show()" "RunDetails(published_pipeline_run_udnie).show()"
@@ -588,9 +587,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python3" "name": "python36"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -0,0 +1,253 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License.\n",
"\n",
"## Authentication in Azure Machine Learning\n",
"\n",
"This notebook shows you how to authenticate to your Azure ML Workspace using\n",
"\n",
" 1. Interactive Login Authentication\n",
" 2. Azure CLI Authentication\n",
" 3. Service Principal Authentication\n",
" \n",
"The interactive authentication is suitable for local experimentation on your own computer. Azure CLI authentication is suitable if you are already using Azure CLI for managing Azure resources, and want to sign in only once. The Service Principal authentication is suitable for automated workflows, for example as part of Azure Devops build."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Workspace"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Interactive Authentication\n",
"\n",
"Interactive authentication is the default mode when using Azure ML SDK.\n",
"\n",
"When you connect to your workspace using workspace.from_config, you will get an interactive login dialog."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Also, if you explicitly specify the subscription ID, resource group and resource group, you will get the dialog."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace(subscription_id=\"my-subscription-id\",\n",
" resource_group=\"my-ml-rg\",\n",
" workspace_name=\"my-ml-workspace\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Note the user you're authenticated as must have access to the subscription and resource group. If you receive an error\n",
"\n",
"```\n",
"AuthenticationException: You don't have access to xxxxxx-xxxx-xxx-xxx-xxxxxxxxxx subscription. All the subscriptions that you have access to = ...\n",
"```\n",
"\n",
"check that the you used correct login and entered the correct subscription ID."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In some cases, you may see a version of the error message containing text: ```All the subscriptions that you have access to = []```\n",
"\n",
"In such a case, you may have to specify the tenant ID of the Azure Active Directory you're using. An example would be accessing a subscription as a guest to a tenant that is not your default. You specify the tenant by explicitly instantiating _InteractiveLoginAuthentication_ with tenant ID as argument ([see instructions how to obtain tenant Id](#get-tenant-id))."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.authentication import InteractiveLoginAuthentication\n",
"\n",
"interactive_auth = InteractiveLoginAuthentication(tenant_id=\"my-tenant-id\")\n",
"\n",
"ws = Workspace(subscription_id=\"my-subscription-id\",\n",
" resource_group=\"my-ml-rg\",\n",
" workspace_name=\"my-ml-workspace\",\n",
" auth=interactive_auth)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Azure CLI Authentication\n",
"\n",
"If you have installed azure-cli package, and used ```az login``` command to log in to your Azure Subscription, you can use _AzureCliAuthentication_ class.\n",
"\n",
"Note that interactive authentication described above won't use existing Azure CLI auth tokens. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.authentication import AzureCliAuthentication\n",
"\n",
"cli_auth = AzureCliAuthentication()\n",
"\n",
"ws = Workspace(subscription_id=\"my-subscription-id\",\n",
" resource_group=\"my-ml-rg\",\n",
" workspace_name=\"my-ml-workspace\",\n",
" auth=cli_auth)\n",
"\n",
"print(\"Found workspace {} at location {}\".format(ws.name, ws.location))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Service Principal Authentication\n",
"\n",
"When setting up a machine learning workflow as an automated process, we recommend using Service Principal Authentication. This approach decouples the authentication from any specific user login, and allows managed access control.\n",
"\n",
"Note that you must have administrator privileges over the Azure subscription to complete these steps.\n",
"\n",
"The first step is to create a service principal. First, go to [Azure Portal](https://portal.azure.com), select **Azure Active Directory** and **App Registrations**. Then select **+New application registration**, give your service principal a name, for example _my-svc-principal_. You can leave application type as is, and specify a dummy value for Sign-on URL, such as _https://invalid_.\n",
"\n",
"Then click **Create**.\n",
"\n",
"![service principal creation]<img src=\"images/svc-pr-1.PNG\">"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The next step is to obtain the _Application ID_ (also called username) and create _password_ for the service principal.\n",
"\n",
"From the page for your newly created service principal, copy the _Application ID_. Then select **Settings** and **Keys**, write a description for your key, and select duration. Then click **Save**, and copy the _password_ to a secure location.\n",
"\n",
"![application id and password](images/svc-pr-2.PNG)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id =\"get-tenant-id\"></a>\n",
"\n",
"Also, you need to obtain the tenant ID of your Azure subscription. Go back to **Azure Active Directory**, select **Properties** and copy _Directory ID_.\n",
"\n",
"![tenant id](images/svc-pr-3.PNG)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, you need to give the service principal permissions to access your workspace. Navigate to **Resource Groups**, to the resource group for your Machine Learning Workspace. \n",
"\n",
"Then select **Access Control (IAM)** and **Add a role assignment**. For _Role_, specify which level of access you need to grant, for example _Contributor_. Start entering your service principal name and once it is found, select it, and click **Save**.\n",
"\n",
"![add role](images/svc-pr-4.PNG)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now you are ready to use the service principal authentication. For example, to connect to your Workspace, see code below and enter your own values for tenant ID, application ID, subscription ID, resource group and workspace.\n",
"\n",
"**We strongly recommended that you do not insert the secret password to code**. Instead, you can use environment variables to pass it to your code, for example through Azure Key Vault, or through secret build variables in Azure DevOps. For local testing, you can for example use following PowerShell command to set the environment variable.\n",
"\n",
"```\n",
"$env:AZUREML_PASSWORD = \"my-password\"\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from azureml.core.authentication import ServicePrincipalAuthentication\n",
"\n",
"svc_pr_password = os.environ.get(\"AZUREML_PASSWORD\")\n",
"\n",
"svc_pr = ServicePrincipalAuthentication(\n",
" tenant_id=\"my-tenant-id\",\n",
" username=\"my-application-id\",\n",
" password=svc_pr_password)\n",
"\n",
"\n",
"ws = Workspace(\n",
" subscription_id=\"my-subscription-id\",\n",
" resource_group=\"my-ml-rg\",\n",
" workspace_name=\"my-ml-workspace\",\n",
" auth=svc_pr\n",
" )\n",
"\n",
"print(\"Found workspace {} at location {}\".format(ws.name, ws.location))"
]
}
],
"metadata": {
"authors": [
{
"name": "roastala"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 121 KiB

View File

@@ -1,8 +1,15 @@
## Azure Machine Learning service training examples ## Azure Machine Learning service training examples
These examples show you: These examples show you:
* Distributed training of models on Machine Learning Compute cluster
* Hyperparameter tuning at scale 1. [How to use the Estimator pattern in Azure ML](how-to-use-estimator)
* Using Tensorboard with Azure ML Python SDK. 2. [Train using TensorFlow Estimator and tune hyperparameters using Hyperdrive](train-hyperparameter-tune-deploy-with-tensorflow)
3. [Train using Pytorch Estimator and tune hyperparameters using Hyperdrive](train-hyperparameter-tune-deploy-with-pytorch)
4. [Distributed training using TensorFlow and Parameter Server](distributed-tensorflow-with-parameter-server)
5. [Distributed training using TensorFlow and Horovod](distributed-tensorflow-with-horovod)
6. [Distributed training using Pytorch and Horovod](distributed-pytorch-with-horovod)
7. [Distributed training using CNTK and custom Docker image](distributed-cntk-with-custom-docker)
8. [Export run history records to Tensorboard](export-run-history-to-tensorboard)
9. [Use TensorBoard to monitor training execution](tensorboard)
Learn more about how to use `Estimator` class to [train deep neural networks with Azure Machine Learning](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-ml-models). Learn more about how to use `Estimator` class to [train deep neural networks with Azure Machine Learning](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-ml-models).

View File

@@ -23,7 +23,7 @@
"source": [ "source": [
"## Prerequisites\n", "## Prerequisites\n",
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n", "* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
"* Go through the [00.configuration.ipynb]() notebook to:\n", "* Go through the [configuration notebook](../../../configuration.ipynb) to:\n",
" * install the AML SDK\n", " * install the AML SDK\n",
" * create a workspace and its configuration file (`config.json`)" " * create a workspace and its configuration file (`config.json`)"
] ]
@@ -69,7 +69,7 @@
"source": [ "source": [
"## Initialize workspace\n", "## Initialize workspace\n",
"\n", "\n",
"Initialize a [Workspace](https://review.docs.microsoft.com/en-us/azure/machine-learning/service/concept-azure-machine-learning-architecture?branch=release-ignite-aml#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`." "Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`."
] ]
}, },
{ {
@@ -81,10 +81,10 @@
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"\n", "\n",
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"print('Workspace name: ' + ws.name, \n", "print('Workspace name: ' + ws.name,\n",
" 'Azure region: ' + ws.location, \n", " 'Azure region: ' + ws.location, \n",
" 'Subscription id: ' + ws.subscription_id, \n", " 'Subscription id: ' + ws.subscription_id, \n",
" 'Resource group: ' + ws.resource_group, sep = '\\n')" " 'Resource group: ' + ws.resource_group, sep='\\n')"
] ]
}, },
{ {
@@ -124,8 +124,8 @@
"\n", "\n",
" compute_target.wait_for_completion(show_output=True)\n", " compute_target.wait_for_completion(show_output=True)\n",
"\n", "\n",
"# Use the 'status' property to get a detailed status for the current AmlCompute. \n", "# use get_status() to get a detailed status for the current AmlCompute\n",
"print(compute_target.status.serialize())" "print(compute_target.get_status().serialize())"
] ]
}, },
{ {
@@ -282,7 +282,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.train.estimator import *\n", "from azureml.train.estimator import Estimator\n",
"\n", "\n",
"script_params = {\n", "script_params = {\n",
" '--num_epochs': 20,\n", " '--num_epochs': 20,\n",
@@ -296,9 +296,9 @@
" script_params=script_params,\n", " script_params=script_params,\n",
" node_count=2,\n", " node_count=2,\n",
" process_count_per_node=1,\n", " process_count_per_node=1,\n",
" distributed_backend='mpi', \n", " distributed_backend='mpi',\n",
" pip_packages=['cntk-gpu==2.6'],\n", " pip_packages=['cntk-gpu==2.6'],\n",
" custom_docker_base_image='microsoft/mmlspark:gpu-0.12',\n", " custom_docker_image='microsoft/mmlspark:gpu-0.12',\n",
" use_gpu=True)" " use_gpu=True)"
] ]
}, },
@@ -306,7 +306,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"We would like to train our model using a [pre-built Docker container](https://hub.docker.com/r/microsoft/mmlspark/). To do so, specify the name of the docker image to the argument `custom_docker_base_image`. You can only provide images available in public docker repositories such as Docker Hub using this argument. To use an image from a private docker repository, use the constructor's `environment_definition` parameter instead. Finally, we provide the `cntk` package to `pip_packages` to install CNTK 2.6 on our custom image.\n", "We would like to train our model using a [pre-built Docker container](https://hub.docker.com/r/microsoft/mmlspark/). To do so, specify the name of the docker image to the argument `custom_docker_image`. Finally, we provide the `cntk` package to `pip_packages` to install CNTK 2.6 on our custom image.\n",
"\n", "\n",
"The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to run distributed CNTK, which uses MPI, you must provide the argument `distributed_backend='mpi'`." "The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to run distributed CNTK, which uses MPI, you must provide the argument `distributed_backend='mpi'`."
] ]

View File

@@ -22,8 +22,8 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Prerequisites\n", "## Prerequisites\n",
"* Go through the [Configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`\n", "* Go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`\n",
"* Review the [tutorial](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) on single-node PyTorch training using Azure Machine Learning" "* Review the [tutorial](../train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) on single-node PyTorch training using Azure Machine Learning"
] ]
}, },
{ {
@@ -82,7 +82,7 @@
"print('Workspace name: ' + ws.name, \n", "print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n", " 'Azure region: ' + ws.location, \n",
" 'Subscription id: ' + ws.subscription_id, \n", " 'Subscription id: ' + ws.subscription_id, \n",
" 'Resource group: ' + ws.resource_group, sep = '\\n')" " 'Resource group: ' + ws.resource_group, sep='\\n')"
] ]
}, },
{ {
@@ -122,8 +122,8 @@
"\n", "\n",
" compute_target.wait_for_completion(show_output=True)\n", " compute_target.wait_for_completion(show_output=True)\n",
"\n", "\n",
"# Use the 'status' property to get a detailed status for the current AmlCompute. \n", "# use get_status() to get a detailed status for the current AmlCompute. \n",
"print(compute_target.status.serialize())" "print(compute_target.get_status().serialize())"
] ]
}, },
{ {

View File

@@ -50,7 +50,7 @@ if args.cuda:
torch.cuda.manual_seed(args.seed) torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} kwargs = {}
train_dataset = \ train_dataset = \
datasets.MNIST('data-%d' % hvd.rank(), train=True, download=True, datasets.MNIST('data-%d' % hvd.rank(), train=True, download=True,
transform=transforms.Compose([ transform=transforms.Compose([

View File

@@ -23,10 +23,10 @@
"source": [ "source": [
"## Prerequisites\n", "## Prerequisites\n",
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning (AML)\n", "* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning (AML)\n",
"* Go through the [00.configuration.ipynb](https://github.com/Azure/MachineLearningNotebooks/blob/master/00.configuration.ipynb) notebook to:\n", "* Go through the [configuration notebook](../../../configuration.ipynb) to:\n",
" * install the AML SDK\n", " * install the AML SDK\n",
" * create a workspace and its configuration file (`config.json`)\n", " * create a workspace and its configuration file (`config.json`)\n",
"* Review the [tutorial](https://aka.ms/aml-notebook-hyperdrive) on single-node TensorFlow training using the SDK" "* Review the [tutorial](../train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) on single-node TensorFlow training using the SDK"
] ]
}, },
{ {
@@ -84,7 +84,7 @@
"print('Workspace name: ' + ws.name, \n", "print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n", " 'Azure region: ' + ws.location, \n",
" 'Subscription id: ' + ws.subscription_id, \n", " 'Subscription id: ' + ws.subscription_id, \n",
" 'Resource group: ' + ws.resource_group, sep = '\\n')" " 'Resource group: ' + ws.resource_group, sep='\\n')"
] ]
}, },
{ {
@@ -96,7 +96,7 @@
"\n", "\n",
"**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n", "**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
"\n", "\n",
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
] ]
}, },
{ {
@@ -124,8 +124,8 @@
"\n", "\n",
" compute_target.wait_for_completion(show_output=True)\n", " compute_target.wait_for_completion(show_output=True)\n",
"\n", "\n",
"# Use the 'status' property to get a detailed status for the current cluster. \n", "# use get_status() to get a detailed status for the current cluster. \n",
"print(compute_target.status.serialize())" "print(compute_target.get_status().serialize())"
] ]
}, },
{ {
@@ -238,8 +238,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import os\n",
"\n",
"project_folder = './tf-distr-hvd'\n", "project_folder = './tf-distr-hvd'\n",
"os.makedirs(project_folder, exist_ok=True)" "os.makedirs(project_folder, exist_ok=True)"
] ]

View File

@@ -23,7 +23,7 @@
"source": [ "source": [
"## Prerequisites\n", "## Prerequisites\n",
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning (AML)\n", "* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning (AML)\n",
"* Go through the [00.configuration.ipynb](https://github.com/Azure/MachineLearningNotebooks/blob/master/00.configuration.ipynb) notebook to:\n", "* Go through the [configuration notebook](../../../configuration.ipynb) to:\n",
" * install the AML SDK\n", " * install the AML SDK\n",
" * create a workspace and its configuration file (`config.json`)\n", " * create a workspace and its configuration file (`config.json`)\n",
"* Review the [tutorial](https://aka.ms/aml-notebook-hyperdrive) on single-node TensorFlow training using the SDK" "* Review the [tutorial](https://aka.ms/aml-notebook-hyperdrive) on single-node TensorFlow training using the SDK"
@@ -124,8 +124,8 @@
"\n", "\n",
" compute_target.wait_for_completion(show_output=True)\n", " compute_target.wait_for_completion(show_output=True)\n",
"\n", "\n",
"# Use the 'status' property to get a detailed status for the current cluster. \n", "# use get_status() to get a detailed status for the current cluster. \n",
"print(compute_target.status.serialize())" "print(compute_target.get_status().serialize())"
] ]
}, },
{ {

View File

@@ -26,7 +26,7 @@
"source": [ "source": [
"## Prerequisites\n", "## Prerequisites\n",
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n", "* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
"* Go through the [00.configuration.ipynb](https://github.com/Azure/MachineLearningNotebooks/blob/master/00.configuration.ipynb) notebook to:\n", "* Go through the [configuration notebook](../../../configuration.ipynb) notebook to:\n",
" * install the AML SDK\n", " * install the AML SDK\n",
" * create a workspace and its configuration file (`config.json`)" " * create a workspace and its configuration file (`config.json`)"
] ]
@@ -74,14 +74,13 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core import Workspace, Run, Experiment\n", "from azureml.core import Workspace, Experiment\n",
"\n",
"\n", "\n",
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"print('Workspace name: ' + ws.name, \n", "print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n", " 'Azure region: ' + ws.location, \n",
" 'Subscription id: ' + ws.subscription_id, \n", " 'Subscription id: ' + ws.subscription_id, \n",
" 'Resource group: ' + ws.resource_group, sep = '\\n')" " 'Resource group: ' + ws.resource_group, sep='\\n')"
] ]
}, },
{ {
@@ -143,7 +142,7 @@
" # More data science stuff\n", " # More data science stuff\n",
" reg = Ridge(alpha=alpha)\n", " reg = Ridge(alpha=alpha)\n",
" reg.fit(data[\"train\"][\"x\"], data[\"train\"][\"y\"])\n", " reg.fit(data[\"train\"][\"x\"], data[\"train\"][\"y\"])\n",
" # TODO save model\n", " \n",
" preds = reg.predict(data[\"test\"][\"x\"])\n", " preds = reg.predict(data[\"test\"][\"x\"])\n",
" mse = mean_squared_error(preds, data[\"test\"][\"y\"])\n", " mse = mean_squared_error(preds, data[\"test\"][\"y\"])\n",
" # End train and eval\n", " # End train and eval\n",
@@ -169,7 +168,6 @@
"# Export Run History to Tensorboard logs\n", "# Export Run History to Tensorboard logs\n",
"from azureml.contrib.tensorboard.export import export_to_tensorboard\n", "from azureml.contrib.tensorboard.export import export_to_tensorboard\n",
"import os\n", "import os\n",
"import tensorflow as tf\n",
"\n", "\n",
"logdir = 'exportedTBlogs'\n", "logdir = 'exportedTBlogs'\n",
"log_path = os.path.join(os.getcwd(), logdir)\n", "log_path = os.path.join(os.getcwd(), logdir)\n",

View File

@@ -0,0 +1,16 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
print("*********************************************************")
print("Hello Azure ML!")
try:
from azureml.core import Run
run = Run.get_context()
print("Log Fibonacci numbers.")
run.log_list('Fibonacci numbers', [0, 1, 1, 2, 3, 5, 8, 13, 21, 34])
run.complete()
except:
print("Warning: you need to install Azure ML SDK in order to log metrics.")
print("*********************************************************")

View File

@@ -0,0 +1,363 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {
"nbpresent": {
"id": "bf74d2e9-2708-49b1-934b-e0ede342f475"
}
},
"source": [
"# How to use Estimator in Azure ML\n",
"\n",
"## Introduction\n",
"This tutorial shows how to use the Estimator pattern in Azure Machine Learning SDK. Estimator is a convenient object in Azure Machine Learning that wraps run configuration information to help simplify the tasks of specifying how a script is executed.\n",
"\n",
"\n",
"## Prerequisite:\n",
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
"* Go through the [configuration notebook](../../../configuration.ipynb) to:\n",
" * install the AML SDK\n",
" * create a workspace and its configuration file (`config.json`)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's get started. First let's import some Python libraries."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"nbpresent": {
"id": "edaa7f2f-2439-4148-b57a-8c794c0945ec"
}
},
"outputs": [],
"source": [
"import azureml.core\n",
"from azureml.core import Workspace\n",
"\n",
"# check core SDK version number\n",
"print(\"Azure ML SDK Version: \", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize workspace\n",
"Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n",
" 'Subscription id: ' + ws.subscription_id, \n",
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {
"nbpresent": {
"id": "59f52294-4a25-4c92-bab8-3b07f0f44d15"
}
},
"source": [
"## Create an Azure ML experiment\n",
"Let's create an experiment named \"estimator-test\". The script runs will be recorded under this experiment in Azure."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"nbpresent": {
"id": "bc70f780-c240-4779-96f3-bc5ef9a37d59"
}
},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"exp = Experiment(workspace=ws, name='estimator-test')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create or Attach existing AmlCompute\n",
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `STANDARD_NC6` GPU VMs. This process is broken down into 3 steps:\n",
"1. create the configuration (this step is local and only takes a second)\n",
"2. create the cluster (this step will take about **20 seconds**)\n",
"3. provision the VMs to bring the cluster to the initial size (of 1 in this case). This step will take about **3-5 minutes** and is providing only sparse output in the process. Please make sure to wait until the call returns before moving to the next cell"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# choose a name for your cluster\n",
"cluster_name = \"cpucluster\"\n",
"\n",
"try:\n",
" cpu_cluster = ComputeTarget(workspace=ws, name=cluster_name)\n",
" print('Found existing compute target')\n",
"except ComputeTargetException:\n",
" print('Creating a new compute target...')\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', max_nodes=4)\n",
"\n",
" # create the cluster\n",
" cpu_cluster = ComputeTarget.create(ws, cluster_name, compute_config)\n",
"\n",
" # can poll for a minimum number of nodes and for a specific timeout. \n",
" # if no min node count is provided it uses the scale settings for the cluster\n",
" cpu_cluster.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
"\n",
"# use get_status() to get a detailed status for the current cluster. \n",
"print(cpu_cluster.get_status().serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now that you have created the compute target, let's see what the workspace's `compute_targets` property returns. You should now see one entry named 'cpucluster' of type `AmlCompute`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"compute_targets = ws.compute_targets\n",
"for name, ct in compute_targets.items():\n",
" print(name, ct.type, ct.provisioning_state)"
]
},
{
"cell_type": "markdown",
"metadata": {
"nbpresent": {
"id": "2039d2d5-aca6-4f25-a12f-df9ae6529cae"
}
},
"source": [
"## Use a simple script\n",
"We have already created a simple \"hello world\" script. This is the script that we will submit through the estimator pattern. It prints a hello-world message, and if Azure ML SDK is installed, it will also logs an array of values ([Fibonacci numbers](https://en.wikipedia.org/wiki/Fibonacci_number))."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"with open('./dummy_train.py', 'r') as f:\n",
" print(f.read())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create A Generic Estimator"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"First we import the Estimator class and also a widget to visualize a run."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.train.estimator import Estimator\n",
"from azureml.widgets import RunDetails"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The simplest estimator is to submit the current folder to the local computer. Estimator by default will attempt to use Docker-based execution. Let's turn that off for now. It then builds a conda environment locally, installs Azure ML SDK in it, and runs your script."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# use a conda environment, don't use Docker, on local computer\n",
"est = Estimator(source_directory='.', compute_target='local', entry_script='dummy_train.py', use_docker=False)\n",
"run = exp.submit(est)\n",
"RunDetails(run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can also enable Docker and let estimator pick the default CPU image supplied by Azure ML for execution. You can target an AmlCompute cluster (or any other supported compute target types)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# use a conda environment on default Docker image in an AmlCompute cluster\n",
"est = Estimator(source_directory='.', compute_target=cpu_cluster, entry_script='dummy_train.py', use_docker=True)\n",
"run = exp.submit(est)\n",
"RunDetails(run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can customize the conda environment by adding conda and/or pip packages."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# add a conda package\n",
"est = Estimator(source_directory='.', \n",
" compute_target='local', \n",
" entry_script='dummy_train.py', \n",
" use_docker=False, \n",
" conda_packages=['scikit-learn'])\n",
"run = exp.submit(est)\n",
"RunDetails(run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can also specify a custom Docker image for exeution. In this case, you probably want to tell the system not to build a new conda environment for you. Instead, you can specify the path to an existing Python environment in the custom Docker image.\n",
"\n",
"**Note**: since the below example points to the preinstalled Python environment in the miniconda3 image maintained by continuum.io on Docker Hub where Azure ML SDK is not present, the logging metric code is not triggered. But a run history record is still recorded. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# use a custom Docker image\n",
"from azureml.core.runconfig import ContainerRegistry\n",
"\n",
"# this is an image available in Docker Hub\n",
"image_name = 'continuumio/miniconda3'\n",
"\n",
"# you can also point to an image in a private ACR\n",
"image_registry_details = ContainerRegistry()\n",
"image_registry_details.address = \"myregistry.azurecr.io\"\n",
"image_registry_details.username = \"username\"\n",
"image_registry_details.password = \"password\"\n",
"\n",
"# don't let the system build a new conda environment\n",
"user_managed_dependencies = True\n",
"\n",
"# submit to a local Docker container. if you don't have Docker engine running locally, you can set compute_target to cpu_cluster.\n",
"est = Estimator(source_directory='.', compute_target='local', \n",
" entry_script='dummy_train.py',\n",
" custom_docker_image=image_name,\n",
" image_registry_details=image_registry_details,\n",
" user_managed=user_managed_dependencies\n",
" )\n",
"\n",
"run = exp.submit(est)\n",
"RunDetails(run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next Steps\n",
"Now you can proceed to explore the other types of estimators, such as TensorFlow estimator, PyTorch estimator, etc. in the sample folder."
]
}
],
"metadata": {
"authors": [
{
"name": "minxia"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
},
"msauthor": "haining"
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -27,7 +27,7 @@
"source": [ "source": [
"## Prerequisites\n", "## Prerequisites\n",
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n", "* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
"* Go through the [00.configuration.ipynb](https://github.com/Azure/MachineLearningNotebooks/blob/master/00.configuration.ipynb) notebook to:\n", "* Go through the [configuration notebook](../../../configuration.ipynb) notebook to:\n",
" * install the AML SDK\n", " * install the AML SDK\n",
" * create a workspace and its configuration file (`config.json`)" " * create a workspace and its configuration file (`config.json`)"
] ]
@@ -104,7 +104,7 @@
"print('Workspace name: ' + ws.name, \n", "print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n", " 'Azure region: ' + ws.location, \n",
" 'Subscription id: ' + ws.subscription_id, \n", " 'Subscription id: ' + ws.subscription_id, \n",
" 'Resource group: ' + ws.resource_group, sep = '\\n')" " 'Resource group: ' + ws.resource_group, sep='\\n')"
] ]
}, },
{ {
@@ -153,7 +153,7 @@
"source": [ "source": [
"import requests\n", "import requests\n",
"import os\n", "import os\n",
"import tempfile\n", "\n",
"tf_code = requests.get(\"https://raw.githubusercontent.com/tensorflow/tensorflow/r1.8/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py\")\n", "tf_code = requests.get(\"https://raw.githubusercontent.com/tensorflow/tensorflow/r1.8/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py\")\n",
"with open(os.path.join(exp_dir, \"mnist_with_summaries.py\"), \"w\") as file:\n", "with open(os.path.join(exp_dir, \"mnist_with_summaries.py\"), \"w\") as file:\n",
" file.write(tf_code.text)" " file.write(tf_code.text)"
@@ -192,9 +192,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core import Experiment, Run\n", "from azureml.core import Experiment\n",
"from azureml.core.script_run_config import ScriptRunConfig\n", "from azureml.core.script_run_config import ScriptRunConfig\n",
"import tensorflow as tf\n",
"\n", "\n",
"logs_dir = os.path.join(os.curdir, \"logs\")\n", "logs_dir = os.path.join(os.curdir, \"logs\")\n",
"data_dir = os.path.abspath(os.path.join(os.curdir, \"mnist_data\"))\n", "data_dir = os.path.abspath(os.path.join(os.curdir, \"mnist_data\"))\n",
@@ -276,7 +275,7 @@
"Tensorboard uploading works with all compute targets. Here we demonstrate it from a DSVM.\n", "Tensorboard uploading works with all compute targets. Here we demonstrate it from a DSVM.\n",
"Note that the Tensorboard instance itself will be run by the notebook kernel. Again, this means this notebook's kernel must have access to the Tensorboard module.\n", "Note that the Tensorboard instance itself will be run by the notebook kernel. Again, this means this notebook's kernel must have access to the Tensorboard module.\n",
"\n", "\n",
"If you are unfamiliar with DSVM configuration, check [04. Train in a remote VM](04.train-on-remote-vm.ipynb) for a more detailed breakdown.\n", "If you are unfamiliar with DSVM configuration, check [Train in a remote VM](../../training/train-on-remote-vm/train-on-remote-vm.ipynb) for a more detailed breakdown.\n",
"\n", "\n",
"**Note**: To streamline the compute that Azure Machine Learning creates, we are making updates to support creating only single to multi-node `AmlCompute`. The `DSVMCompute` class will be deprecated in a later release, but the DSVM can be created using the below single line command and then attached(like any VM) using the sample code below. Also note, that we only support Linux VMs for remote execution from AML and the commands below will spin a Linux VM only.\n", "**Note**: To streamline the compute that Azure Machine Learning creates, we are making updates to support creating only single to multi-node `AmlCompute`. The `DSVMCompute` class will be deprecated in a later release, but the DSVM can be created using the below single line command and then attached(like any VM) using the sample code below. Also note, that we only support Linux VMs for remote execution from AML and the commands below will spin a Linux VM only.\n",
"\n", "\n",
@@ -296,7 +295,6 @@
"source": [ "source": [
"from azureml.core.compute import RemoteCompute\n", "from azureml.core.compute import RemoteCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n", "from azureml.core.compute_target import ComputeTargetException\n",
"import os\n",
"\n", "\n",
"username = os.getenv('AZUREML_DSVM_USERNAME', default='<my_username>')\n", "username = os.getenv('AZUREML_DSVM_USERNAME', default='<my_username>')\n",
"address = os.getenv('AZUREML_DSVM_ADDRESS', default='<ip_address_or_fqdn>')\n", "address = os.getenv('AZUREML_DSVM_ADDRESS', default='<ip_address_or_fqdn>')\n",
@@ -405,7 +403,6 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n", "from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n", "\n",
"# choose a name for your cluster\n", "# choose a name for your cluster\n",
"cluster_name = \"cpucluster\"\n", "cluster_name = \"cpucluster\"\n",
@@ -423,8 +420,8 @@
"\n", "\n",
"compute_target.wait_for_completion(show_output=True, min_node_count=1, timeout_in_minutes=20)\n", "compute_target.wait_for_completion(show_output=True, min_node_count=1, timeout_in_minutes=20)\n",
"\n", "\n",
"# Use the 'status' property to get a detailed status for the current cluster. \n", "# use get_status() to get a detailed status for the current cluster. \n",
"print(compute_target.status.serialize())" "print(compute_target.get_status().serialize())"
] ]
}, },
{ {

View File

@@ -25,7 +25,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Prerequisites\n", "## Prerequisites\n",
"* Go through the [Configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`" "* Go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`"
] ]
}, },
{ {
@@ -83,7 +83,7 @@
"print('Workspace name: ' + ws.name, \n", "print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n", " 'Azure region: ' + ws.location, \n",
" 'Subscription id: ' + ws.subscription_id, \n", " 'Subscription id: ' + ws.subscription_id, \n",
" 'Resource group: ' + ws.resource_group, sep = '\\n')" " 'Resource group: ' + ws.resource_group, sep='\\n')"
] ]
}, },
{ {
@@ -123,8 +123,8 @@
"\n", "\n",
" compute_target.wait_for_completion(show_output=True)\n", " compute_target.wait_for_completion(show_output=True)\n",
"\n", "\n",
"# Use the 'status' property to get a detailed status for the current cluster. \n", "# use get_status() to get a detailed status for the current cluster. \n",
"print(compute_target.status.serialize())" "print(compute_target.get_status().serialize())"
] ]
}, },
{ {
@@ -359,7 +359,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.train.hyperdrive import *\n", "from azureml.train.hyperdrive import RandomParameterSampling, BanditPolicy, HyperDriveRunConfig, uniform, PrimaryMetricGoal\n",
"\n", "\n",
"param_sampling = RandomParameterSampling( {\n", "param_sampling = RandomParameterSampling( {\n",
" 'learning_rate': uniform(0.0005, 0.005),\n", " 'learning_rate': uniform(0.0005, 0.005),\n",
@@ -409,8 +409,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.widgets import RunDetails\n",
"\n",
"RunDetails(hyperdrive_run).show()" "RunDetails(hyperdrive_run).show()"
] ]
}, },
@@ -649,7 +647,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import os, json\n", "import json\n",
"from PIL import Image\n", "from PIL import Image\n",
"import matplotlib.pyplot as plt\n", "import matplotlib.pyplot as plt\n",
"\n", "\n",

View File

@@ -26,7 +26,7 @@
"\n", "\n",
"## Prerequisite:\n", "## Prerequisite:\n",
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n", "* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
"* Go through the [00.configuration.ipynb](https://github.com/Azure/MachineLearningNotebooks/blob/master/00.configuration.ipynb) notebook to:\n", "* Go through the [configuration notebook](../../../configuration.ipynb) to:\n",
" * install the AML SDK\n", " * install the AML SDK\n",
" * create a workspace and its configuration file (`config.json`)" " * create a workspace and its configuration file (`config.json`)"
] ]
@@ -51,7 +51,6 @@
"%matplotlib inline\n", "%matplotlib inline\n",
"import numpy as np\n", "import numpy as np\n",
"import os\n", "import os\n",
"import matplotlib\n",
"import matplotlib.pyplot as plt" "import matplotlib.pyplot as plt"
] ]
}, },
@@ -66,7 +65,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import azureml\n", "import azureml\n",
"from azureml.core import Workspace, Run\n", "from azureml.core import Workspace\n",
"\n", "\n",
"# check core SDK version number\n", "# check core SDK version number\n",
"print(\"Azure ML SDK Version: \", azureml.core.VERSION)" "print(\"Azure ML SDK Version: \", azureml.core.VERSION)"
@@ -109,8 +108,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core.workspace import Workspace\n",
"\n",
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"print('Workspace name: ' + ws.name, \n", "print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n", " 'Azure region: ' + ws.location, \n",
@@ -166,7 +163,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import os\n",
"import urllib\n", "import urllib\n",
"\n", "\n",
"os.makedirs('./data/mnist', exist_ok=True)\n", "os.makedirs('./data/mnist', exist_ok=True)\n",
@@ -299,8 +295,8 @@
" # if no min node count is provided it uses the scale settings for the cluster\n", " # if no min node count is provided it uses the scale settings for the cluster\n",
" compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n", " compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
"\n", "\n",
"# Use the 'status' property to get a detailed status for the current cluster. \n", "# use get_status() to get a detailed status for the current cluster. \n",
"print(compute_target.status.serialize())" "print(compute_target.get_status().serialize())"
] ]
}, },
{ {
@@ -431,7 +427,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Submit job to run\n", "## Submit job to run\n",
"Calling the `fit` function on the estimator submits the job to Azure ML for execution. Submitting the job should only take a few seconds." "Submit the estimator to an Azure ML experiment to kick off the execution."
] ]
}, },
{ {
@@ -552,7 +548,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import os\n",
"\n", "\n",
"os.makedirs('./imgs', exist_ok=True)\n", "os.makedirs('./imgs', exist_ok=True)\n",
"metrics = run.get_metrics()\n", "metrics = run.get_metrics()\n",
@@ -685,7 +680,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.train.hyperdrive import *\n", "from azureml.train.hyperdrive import RandomParameterSampling, BanditPolicy, HyperDriveRunConfig, PrimaryMetricGoal\n",
"from azureml.train.hyperdrive import choice, loguniform\n",
"\n", "\n",
"ps = RandomParameterSampling(\n", "ps = RandomParameterSampling(\n",
" {\n", " {\n",
@@ -1079,7 +1075,6 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import requests\n", "import requests\n",
"import json\n",
"\n", "\n",
"# send a random row from the test set to score\n", "# send a random row from the test set to score\n",
"random_index = np.random.randint(0, len(X_test)-1)\n", "random_index = np.random.randint(0, len(X_test)-1)\n",
@@ -1163,7 +1158,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.6.6" "version": "3.6.8"
}, },
"msauthor": "minxia" "msauthor": "minxia"
}, },

View File

@@ -6,4 +6,5 @@ Follow these sample notebooks to learn:
2. [Train on local](train-on-local): train a model using local computer as compute target. 2. [Train on local](train-on-local): train a model using local computer as compute target.
3. [Train on remote VM](train-on-remote-vm): train a model using a remote Azure VM as compute target. 3. [Train on remote VM](train-on-remote-vm): train a model using a remote Azure VM as compute target.
4. [Train on AmlCompute](train-on-amlcompute): train a model using an AmlCompute cluster as compute target. 4. [Train on AmlCompute](train-on-amlcompute): train a model using an AmlCompute cluster as compute target.
5. [Logging API](logging-api): experiment with various logging functions to create runs and automatically generate graphs. 5. [Train in an HDI Spark cluster](train-in-spark): train a Spark ML model using an HDInsight Spark cluster as compute target.
6. [Logging API](logging-api): experiment with various logging functions to create runs and automatically generate graphs.

View File

@@ -22,7 +22,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Prerequisites\n", "## Prerequisites\n",
"Make sure you go through the [00. Installation and Configuration](../../00.configuration.ipynb) Notebook first if you haven't. Also make sure you have tqdm and matplotlib installed in the current kernel.\n", "Make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't. Also make sure you have tqdm and matplotlib installed in the current kernel.\n",
"\n", "\n",
"```\n", "```\n",
"(myenv) $ conda install -y tqdm matplotlib\n", "(myenv) $ conda install -y tqdm matplotlib\n",
@@ -46,7 +46,7 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core import Experiment, Run, Workspace\n", "from azureml.core import Experiment, Workspace\n",
"import azureml.core\n", "import azureml.core\n",
"import numpy as np\n", "import numpy as np\n",
"\n", "\n",

View File

@@ -0,0 +1,150 @@
5.1,3.5,1.4,0.2,Iris-setosa
4.9,3.0,1.4,0.2,Iris-setosa
4.7,3.2,1.3,0.2,Iris-setosa
4.6,3.1,1.5,0.2,Iris-setosa
5.0,3.6,1.4,0.2,Iris-setosa
5.4,3.9,1.7,0.4,Iris-setosa
4.6,3.4,1.4,0.3,Iris-setosa
5.0,3.4,1.5,0.2,Iris-setosa
4.4,2.9,1.4,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
5.4,3.7,1.5,0.2,Iris-setosa
4.8,3.4,1.6,0.2,Iris-setosa
4.8,3.0,1.4,0.1,Iris-setosa
4.3,3.0,1.1,0.1,Iris-setosa
5.8,4.0,1.2,0.2,Iris-setosa
5.7,4.4,1.5,0.4,Iris-setosa
5.4,3.9,1.3,0.4,Iris-setosa
5.1,3.5,1.4,0.3,Iris-setosa
5.7,3.8,1.7,0.3,Iris-setosa
5.1,3.8,1.5,0.3,Iris-setosa
5.4,3.4,1.7,0.2,Iris-setosa
5.1,3.7,1.5,0.4,Iris-setosa
4.6,3.6,1.0,0.2,Iris-setosa
5.1,3.3,1.7,0.5,Iris-setosa
4.8,3.4,1.9,0.2,Iris-setosa
5.0,3.0,1.6,0.2,Iris-setosa
5.0,3.4,1.6,0.4,Iris-setosa
5.2,3.5,1.5,0.2,Iris-setosa
5.2,3.4,1.4,0.2,Iris-setosa
4.7,3.2,1.6,0.2,Iris-setosa
4.8,3.1,1.6,0.2,Iris-setosa
5.4,3.4,1.5,0.4,Iris-setosa
5.2,4.1,1.5,0.1,Iris-setosa
5.5,4.2,1.4,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
5.0,3.2,1.2,0.2,Iris-setosa
5.5,3.5,1.3,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
4.4,3.0,1.3,0.2,Iris-setosa
5.1,3.4,1.5,0.2,Iris-setosa
5.0,3.5,1.3,0.3,Iris-setosa
4.5,2.3,1.3,0.3,Iris-setosa
4.4,3.2,1.3,0.2,Iris-setosa
5.0,3.5,1.6,0.6,Iris-setosa
5.1,3.8,1.9,0.4,Iris-setosa
4.8,3.0,1.4,0.3,Iris-setosa
5.1,3.8,1.6,0.2,Iris-setosa
4.6,3.2,1.4,0.2,Iris-setosa
5.3,3.7,1.5,0.2,Iris-setosa
5.0,3.3,1.4,0.2,Iris-setosa
7.0,3.2,4.7,1.4,Iris-versicolor
6.4,3.2,4.5,1.5,Iris-versicolor
6.9,3.1,4.9,1.5,Iris-versicolor
5.5,2.3,4.0,1.3,Iris-versicolor
6.5,2.8,4.6,1.5,Iris-versicolor
5.7,2.8,4.5,1.3,Iris-versicolor
6.3,3.3,4.7,1.6,Iris-versicolor
4.9,2.4,3.3,1.0,Iris-versicolor
6.6,2.9,4.6,1.3,Iris-versicolor
5.2,2.7,3.9,1.4,Iris-versicolor
5.0,2.0,3.5,1.0,Iris-versicolor
5.9,3.0,4.2,1.5,Iris-versicolor
6.0,2.2,4.0,1.0,Iris-versicolor
6.1,2.9,4.7,1.4,Iris-versicolor
5.6,2.9,3.6,1.3,Iris-versicolor
6.7,3.1,4.4,1.4,Iris-versicolor
5.6,3.0,4.5,1.5,Iris-versicolor
5.8,2.7,4.1,1.0,Iris-versicolor
6.2,2.2,4.5,1.5,Iris-versicolor
5.6,2.5,3.9,1.1,Iris-versicolor
5.9,3.2,4.8,1.8,Iris-versicolor
6.1,2.8,4.0,1.3,Iris-versicolor
6.3,2.5,4.9,1.5,Iris-versicolor
6.1,2.8,4.7,1.2,Iris-versicolor
6.4,2.9,4.3,1.3,Iris-versicolor
6.6,3.0,4.4,1.4,Iris-versicolor
6.8,2.8,4.8,1.4,Iris-versicolor
6.7,3.0,5.0,1.7,Iris-versicolor
6.0,2.9,4.5,1.5,Iris-versicolor
5.7,2.6,3.5,1.0,Iris-versicolor
5.5,2.4,3.8,1.1,Iris-versicolor
5.5,2.4,3.7,1.0,Iris-versicolor
5.8,2.7,3.9,1.2,Iris-versicolor
6.0,2.7,5.1,1.6,Iris-versicolor
5.4,3.0,4.5,1.5,Iris-versicolor
6.0,3.4,4.5,1.6,Iris-versicolor
6.7,3.1,4.7,1.5,Iris-versicolor
6.3,2.3,4.4,1.3,Iris-versicolor
5.6,3.0,4.1,1.3,Iris-versicolor
5.5,2.5,4.0,1.3,Iris-versicolor
5.5,2.6,4.4,1.2,Iris-versicolor
6.1,3.0,4.6,1.4,Iris-versicolor
5.8,2.6,4.0,1.2,Iris-versicolor
5.0,2.3,3.3,1.0,Iris-versicolor
5.6,2.7,4.2,1.3,Iris-versicolor
5.7,3.0,4.2,1.2,Iris-versicolor
5.7,2.9,4.2,1.3,Iris-versicolor
6.2,2.9,4.3,1.3,Iris-versicolor
5.1,2.5,3.0,1.1,Iris-versicolor
5.7,2.8,4.1,1.3,Iris-versicolor
6.3,3.3,6.0,2.5,Iris-virginica
5.8,2.7,5.1,1.9,Iris-virginica
7.1,3.0,5.9,2.1,Iris-virginica
6.3,2.9,5.6,1.8,Iris-virginica
6.5,3.0,5.8,2.2,Iris-virginica
7.6,3.0,6.6,2.1,Iris-virginica
4.9,2.5,4.5,1.7,Iris-virginica
7.3,2.9,6.3,1.8,Iris-virginica
6.7,2.5,5.8,1.8,Iris-virginica
7.2,3.6,6.1,2.5,Iris-virginica
6.5,3.2,5.1,2.0,Iris-virginica
6.4,2.7,5.3,1.9,Iris-virginica
6.8,3.0,5.5,2.1,Iris-virginica
5.7,2.5,5.0,2.0,Iris-virginica
5.8,2.8,5.1,2.4,Iris-virginica
6.4,3.2,5.3,2.3,Iris-virginica
6.5,3.0,5.5,1.8,Iris-virginica
7.7,3.8,6.7,2.2,Iris-virginica
7.7,2.6,6.9,2.3,Iris-virginica
6.0,2.2,5.0,1.5,Iris-virginica
6.9,3.2,5.7,2.3,Iris-virginica
5.6,2.8,4.9,2.0,Iris-virginica
7.7,2.8,6.7,2.0,Iris-virginica
6.3,2.7,4.9,1.8,Iris-virginica
6.7,3.3,5.7,2.1,Iris-virginica
7.2,3.2,6.0,1.8,Iris-virginica
6.2,2.8,4.8,1.8,Iris-virginica
6.1,3.0,4.9,1.8,Iris-virginica
6.4,2.8,5.6,2.1,Iris-virginica
7.2,3.0,5.8,1.6,Iris-virginica
7.4,2.8,6.1,1.9,Iris-virginica
7.9,3.8,6.4,2.0,Iris-virginica
6.4,2.8,5.6,2.2,Iris-virginica
6.3,2.8,5.1,1.5,Iris-virginica
6.1,2.6,5.6,1.4,Iris-virginica
7.7,3.0,6.1,2.3,Iris-virginica
6.3,3.4,5.6,2.4,Iris-virginica
6.4,3.1,5.5,1.8,Iris-virginica
6.0,3.0,4.8,1.8,Iris-virginica
6.9,3.1,5.4,2.1,Iris-virginica
6.7,3.1,5.6,2.4,Iris-virginica
6.9,3.1,5.1,2.3,Iris-virginica
5.8,2.7,5.1,1.9,Iris-virginica
6.8,3.2,5.9,2.3,Iris-virginica
6.7,3.3,5.7,2.5,Iris-virginica
6.7,3.0,5.2,2.3,Iris-virginica
6.3,2.5,5.0,1.9,Iris-virginica
6.5,3.0,5.2,2.0,Iris-virginica
6.2,3.4,5.4,2.3,Iris-virginica
5.9,3.0,5.1,1.8,Iris-virginica
1 5.1 3.5 1.4 0.2 Iris-setosa
2 4.9 3.0 1.4 0.2 Iris-setosa
3 4.7 3.2 1.3 0.2 Iris-setosa
4 4.6 3.1 1.5 0.2 Iris-setosa
5 5.0 3.6 1.4 0.2 Iris-setosa
6 5.4 3.9 1.7 0.4 Iris-setosa
7 4.6 3.4 1.4 0.3 Iris-setosa
8 5.0 3.4 1.5 0.2 Iris-setosa
9 4.4 2.9 1.4 0.2 Iris-setosa
10 4.9 3.1 1.5 0.1 Iris-setosa
11 5.4 3.7 1.5 0.2 Iris-setosa
12 4.8 3.4 1.6 0.2 Iris-setosa
13 4.8 3.0 1.4 0.1 Iris-setosa
14 4.3 3.0 1.1 0.1 Iris-setosa
15 5.8 4.0 1.2 0.2 Iris-setosa
16 5.7 4.4 1.5 0.4 Iris-setosa
17 5.4 3.9 1.3 0.4 Iris-setosa
18 5.1 3.5 1.4 0.3 Iris-setosa
19 5.7 3.8 1.7 0.3 Iris-setosa
20 5.1 3.8 1.5 0.3 Iris-setosa
21 5.4 3.4 1.7 0.2 Iris-setosa
22 5.1 3.7 1.5 0.4 Iris-setosa
23 4.6 3.6 1.0 0.2 Iris-setosa
24 5.1 3.3 1.7 0.5 Iris-setosa
25 4.8 3.4 1.9 0.2 Iris-setosa
26 5.0 3.0 1.6 0.2 Iris-setosa
27 5.0 3.4 1.6 0.4 Iris-setosa
28 5.2 3.5 1.5 0.2 Iris-setosa
29 5.2 3.4 1.4 0.2 Iris-setosa
30 4.7 3.2 1.6 0.2 Iris-setosa
31 4.8 3.1 1.6 0.2 Iris-setosa
32 5.4 3.4 1.5 0.4 Iris-setosa
33 5.2 4.1 1.5 0.1 Iris-setosa
34 5.5 4.2 1.4 0.2 Iris-setosa
35 4.9 3.1 1.5 0.1 Iris-setosa
36 5.0 3.2 1.2 0.2 Iris-setosa
37 5.5 3.5 1.3 0.2 Iris-setosa
38 4.9 3.1 1.5 0.1 Iris-setosa
39 4.4 3.0 1.3 0.2 Iris-setosa
40 5.1 3.4 1.5 0.2 Iris-setosa
41 5.0 3.5 1.3 0.3 Iris-setosa
42 4.5 2.3 1.3 0.3 Iris-setosa
43 4.4 3.2 1.3 0.2 Iris-setosa
44 5.0 3.5 1.6 0.6 Iris-setosa
45 5.1 3.8 1.9 0.4 Iris-setosa
46 4.8 3.0 1.4 0.3 Iris-setosa
47 5.1 3.8 1.6 0.2 Iris-setosa
48 4.6 3.2 1.4 0.2 Iris-setosa
49 5.3 3.7 1.5 0.2 Iris-setosa
50 5.0 3.3 1.4 0.2 Iris-setosa
51 7.0 3.2 4.7 1.4 Iris-versicolor
52 6.4 3.2 4.5 1.5 Iris-versicolor
53 6.9 3.1 4.9 1.5 Iris-versicolor
54 5.5 2.3 4.0 1.3 Iris-versicolor
55 6.5 2.8 4.6 1.5 Iris-versicolor
56 5.7 2.8 4.5 1.3 Iris-versicolor
57 6.3 3.3 4.7 1.6 Iris-versicolor
58 4.9 2.4 3.3 1.0 Iris-versicolor
59 6.6 2.9 4.6 1.3 Iris-versicolor
60 5.2 2.7 3.9 1.4 Iris-versicolor
61 5.0 2.0 3.5 1.0 Iris-versicolor
62 5.9 3.0 4.2 1.5 Iris-versicolor
63 6.0 2.2 4.0 1.0 Iris-versicolor
64 6.1 2.9 4.7 1.4 Iris-versicolor
65 5.6 2.9 3.6 1.3 Iris-versicolor
66 6.7 3.1 4.4 1.4 Iris-versicolor
67 5.6 3.0 4.5 1.5 Iris-versicolor
68 5.8 2.7 4.1 1.0 Iris-versicolor
69 6.2 2.2 4.5 1.5 Iris-versicolor
70 5.6 2.5 3.9 1.1 Iris-versicolor
71 5.9 3.2 4.8 1.8 Iris-versicolor
72 6.1 2.8 4.0 1.3 Iris-versicolor
73 6.3 2.5 4.9 1.5 Iris-versicolor
74 6.1 2.8 4.7 1.2 Iris-versicolor
75 6.4 2.9 4.3 1.3 Iris-versicolor
76 6.6 3.0 4.4 1.4 Iris-versicolor
77 6.8 2.8 4.8 1.4 Iris-versicolor
78 6.7 3.0 5.0 1.7 Iris-versicolor
79 6.0 2.9 4.5 1.5 Iris-versicolor
80 5.7 2.6 3.5 1.0 Iris-versicolor
81 5.5 2.4 3.8 1.1 Iris-versicolor
82 5.5 2.4 3.7 1.0 Iris-versicolor
83 5.8 2.7 3.9 1.2 Iris-versicolor
84 6.0 2.7 5.1 1.6 Iris-versicolor
85 5.4 3.0 4.5 1.5 Iris-versicolor
86 6.0 3.4 4.5 1.6 Iris-versicolor
87 6.7 3.1 4.7 1.5 Iris-versicolor
88 6.3 2.3 4.4 1.3 Iris-versicolor
89 5.6 3.0 4.1 1.3 Iris-versicolor
90 5.5 2.5 4.0 1.3 Iris-versicolor
91 5.5 2.6 4.4 1.2 Iris-versicolor
92 6.1 3.0 4.6 1.4 Iris-versicolor
93 5.8 2.6 4.0 1.2 Iris-versicolor
94 5.0 2.3 3.3 1.0 Iris-versicolor
95 5.6 2.7 4.2 1.3 Iris-versicolor
96 5.7 3.0 4.2 1.2 Iris-versicolor
97 5.7 2.9 4.2 1.3 Iris-versicolor
98 6.2 2.9 4.3 1.3 Iris-versicolor
99 5.1 2.5 3.0 1.1 Iris-versicolor
100 5.7 2.8 4.1 1.3 Iris-versicolor
101 6.3 3.3 6.0 2.5 Iris-virginica
102 5.8 2.7 5.1 1.9 Iris-virginica
103 7.1 3.0 5.9 2.1 Iris-virginica
104 6.3 2.9 5.6 1.8 Iris-virginica
105 6.5 3.0 5.8 2.2 Iris-virginica
106 7.6 3.0 6.6 2.1 Iris-virginica
107 4.9 2.5 4.5 1.7 Iris-virginica
108 7.3 2.9 6.3 1.8 Iris-virginica
109 6.7 2.5 5.8 1.8 Iris-virginica
110 7.2 3.6 6.1 2.5 Iris-virginica
111 6.5 3.2 5.1 2.0 Iris-virginica
112 6.4 2.7 5.3 1.9 Iris-virginica
113 6.8 3.0 5.5 2.1 Iris-virginica
114 5.7 2.5 5.0 2.0 Iris-virginica
115 5.8 2.8 5.1 2.4 Iris-virginica
116 6.4 3.2 5.3 2.3 Iris-virginica
117 6.5 3.0 5.5 1.8 Iris-virginica
118 7.7 3.8 6.7 2.2 Iris-virginica
119 7.7 2.6 6.9 2.3 Iris-virginica
120 6.0 2.2 5.0 1.5 Iris-virginica
121 6.9 3.2 5.7 2.3 Iris-virginica
122 5.6 2.8 4.9 2.0 Iris-virginica
123 7.7 2.8 6.7 2.0 Iris-virginica
124 6.3 2.7 4.9 1.8 Iris-virginica
125 6.7 3.3 5.7 2.1 Iris-virginica
126 7.2 3.2 6.0 1.8 Iris-virginica
127 6.2 2.8 4.8 1.8 Iris-virginica
128 6.1 3.0 4.9 1.8 Iris-virginica
129 6.4 2.8 5.6 2.1 Iris-virginica
130 7.2 3.0 5.8 1.6 Iris-virginica
131 7.4 2.8 6.1 1.9 Iris-virginica
132 7.9 3.8 6.4 2.0 Iris-virginica
133 6.4 2.8 5.6 2.2 Iris-virginica
134 6.3 2.8 5.1 1.5 Iris-virginica
135 6.1 2.6 5.6 1.4 Iris-virginica
136 7.7 3.0 6.1 2.3 Iris-virginica
137 6.3 3.4 5.6 2.4 Iris-virginica
138 6.4 3.1 5.5 1.8 Iris-virginica
139 6.0 3.0 4.8 1.8 Iris-virginica
140 6.9 3.1 5.4 2.1 Iris-virginica
141 6.7 3.1 5.6 2.4 Iris-virginica
142 6.9 3.1 5.1 2.3 Iris-virginica
143 5.8 2.7 5.1 1.9 Iris-virginica
144 6.8 3.2 5.9 2.3 Iris-virginica
145 6.7 3.3 5.7 2.5 Iris-virginica
146 6.7 3.0 5.2 2.3 Iris-virginica
147 6.3 2.5 5.0 1.9 Iris-virginica
148 6.5 3.0 5.2 2.0 Iris-virginica
149 6.2 3.4 5.4 2.3 Iris-virginica
150 5.9 3.0 5.1 1.8 Iris-virginica

View File

@@ -0,0 +1,278 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 05. Train in Spark\n",
"* Create Workspace\n",
"* Create Experiment\n",
"* Copy relevant files to the script folder\n",
"* Configure and Run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Prerequisites\n",
"Make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Check core SDK version number\n",
"import azureml.core\n",
"\n",
"print(\"SDK version:\", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize Workspace\n",
"\n",
"Initialize a workspace object from persisted configuration."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Workspace\n",
"\n",
"ws = Workspace.from_config()\n",
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create Experiment\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"experiment_name = 'train-on-spark'\n",
"\n",
"from azureml.core import Experiment\n",
"exp = Experiment(workspace=ws, name=experiment_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## View `train-spark.py`\n",
"\n",
"For convenience, we created a training script for you. It is printed below as a text, but you can also run `%pfile ./train-spark.py` in a cell to show the file."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"with open('train-spark.py', 'r') as training_script:\n",
" print(training_script.read())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Configure & Run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Note** You can use Docker-based execution to run the Spark job in local computer or a remote VM. Please see the `train-in-remote-vm` notebook for example on how to configure and run in Docker mode in a VM. Make sure you choose a Docker image that has Spark installed, such as `azureml.core.runconfig.DEFAULT_MMLSPARK_CPU_IMAGE`."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Attach an HDI cluster\n",
"Here we will use a actual Spark cluster, HDInsight for Spark, to run this job. To use HDI commpute target:\n",
" 1. Create a Spark for HDI cluster in Azure. Here are some [quick instructions](https://docs.microsoft.com/en-us/azure/hdinsight/spark/apache-spark-jupyter-spark-sql). Make sure you use the Ubuntu flavor, NOT CentOS.\n",
" 2. Enter the IP address, username and password below"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, HDInsightCompute\n",
"from azureml.exceptions import ComputeTargetException\n",
"import os\n",
"\n",
"try:\n",
" # if you want to connect using SSH key instead of username/password you can provide parameters private_key_file and private_key_passphrase\n",
" attach_config = HDInsightCompute.attach_configuration(address=os.environ.get('hdiservername', '<my_hdi_cluster_name>-ssh.azurehdinsight.net'), \n",
" ssh_port=22, \n",
" username=os.environ.get('hdiusername', '<ssh_username>'), \n",
" password=os.environ.get('hdipassword', '<my_password>'))\n",
" hdi_compute = ComputeTarget.attach(workspace=ws, \n",
" name='myhdi', \n",
" attach_configuration=attach_config)\n",
"\n",
"except ComputeTargetException as e:\n",
" print(\"Caught = {}\".format(e.message))\n",
" \n",
" \n",
"hdi_compute.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Configure HDI run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Configure an execution using the HDInsight cluster with a conda environment that has `numpy`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.runconfig import RunConfiguration\n",
"from azureml.core.conda_dependencies import CondaDependencies\n",
"\n",
"# use pyspark framework\n",
"hdi_run_config = RunConfiguration(framework=\"pyspark\")\n",
"\n",
"# Set compute target to the HDI cluster\n",
"hdi_run_config.target = hdi_compute.name\n",
"\n",
"# specify CondaDependencies object to ask system installing numpy\n",
"cd = CondaDependencies()\n",
"cd.add_conda_package('numpy')\n",
"hdi_run_config.environment.python.conda_dependencies = cd"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit the script to HDI"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import ScriptRunConfig\n",
"\n",
"script_run_config = ScriptRunConfig(source_directory = '.',\n",
" script= 'train-spark.py',\n",
" run_config = hdi_run_config)\n",
"run = exp.submit(config=script_run_config)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Monitor the run using a Juypter widget"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"RunDetails(run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"After the run is succesfully finished, you can check the metrics logged."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# get all metris logged in the run\n",
"metrics = run.get_metrics()\n",
"print(metrics)"
]
}
],
"metadata": {
"authors": [
{
"name": "aashishb"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,97 @@
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license.
import numpy as np
import pyspark
import os
import urllib
import sys
from pyspark.sql.functions import *
from pyspark.ml.classification import *
from pyspark.ml.evaluation import *
from pyspark.ml.feature import *
from pyspark.sql.types import StructType, StructField
from pyspark.sql.types import DoubleType, IntegerType, StringType
from azureml.core.run import Run
# initialize logger
run = Run.get_context()
# start Spark session
spark = pyspark.sql.SparkSession.builder.appName('Iris').getOrCreate()
# print runtime versions
print('****************')
print('Python version: {}'.format(sys.version))
print('Spark version: {}'.format(spark.version))
print('****************')
# load iris.csv into Spark dataframe
schema = StructType([
StructField("sepal-length", DoubleType()),
StructField("sepal-width", DoubleType()),
StructField("petal-length", DoubleType()),
StructField("petal-width", DoubleType()),
StructField("class", StringType())
])
data = spark.read.format("com.databricks.spark.csv") \
.option("header", "true") \
.schema(schema) \
.load("iris.csv")
print("First 10 rows of Iris dataset:")
data.show(10)
# vectorize all numerical columns into a single feature column
feature_cols = data.columns[:-1]
assembler = pyspark.ml.feature.VectorAssembler(
inputCols=feature_cols, outputCol='features')
data = assembler.transform(data)
# convert text labels into indices
data = data.select(['features', 'class'])
label_indexer = pyspark.ml.feature.StringIndexer(
inputCol='class', outputCol='label').fit(data)
data = label_indexer.transform(data)
# only select the features and label column
data = data.select(['features', 'label'])
print("Reading for machine learning")
data.show(10)
# change regularization rate and you will likely get a different accuracy.
reg = 0.01
# load regularization rate from argument if present
if len(sys.argv) > 1:
reg = float(sys.argv[1])
# log regularization rate
run.log("Regularization Rate", reg)
# use Logistic Regression to train on the training set
train, test = data.randomSplit([0.70, 0.30])
lr = pyspark.ml.classification.LogisticRegression(regParam=reg)
model = lr.fit(train)
# predict on the test set
prediction = model.transform(test)
print("Prediction")
prediction.show(10)
# evaluate the accuracy of the model using the test set
evaluator = pyspark.ml.evaluation.MulticlassClassificationEvaluator(
metricName='accuracy')
accuracy = evaluator.evaluate(prediction)
print()
print('#####################################')
print('Regularization rate is {}'.format(reg))
print("Accuracy is {}".format(accuracy))
print('#####################################')
print()
# log accuracy
run.log('Accuracy', accuracy)

View File

@@ -31,7 +31,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Prerequisites\n", "## Prerequisites\n",
"Make sure you go through the [00.configuration.ipynb](https://github.com/Azure/MachineLearningNotebooks/blob/master/00.configuration.ipynb) Notebook first if you haven't." "Make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't."
] ]
}, },
{ {
@@ -119,7 +119,7 @@
"\n", "\n",
"First lets check which VM families are available in your region. Azure is a regional service and some specialized SKUs (especially GPUs) are only available in certain regions. Since AmlCompute is created in the region of your workspace, we will use the supported_vms () function to see if the VM family we want to use ('STANDARD_D2_V2') is supported.\n", "First lets check which VM families are available in your region. Azure is a regional service and some specialized SKUs (especially GPUs) are only available in certain regions. Since AmlCompute is created in the region of your workspace, we will use the supported_vms () function to see if the VM family we want to use ('STANDARD_D2_V2') is supported.\n",
"\n", "\n",
"You can also pass a different region to check availability and then re-create your workspace in that region through the [00. Installation and Configuration](00.configuration.ipynb)" "You can also pass a different region to check availability and then re-create your workspace in that region through the [configuration notebook](../../../configuration.ipynb)"
] ]
}, },
{ {
@@ -214,6 +214,13 @@
"run" "run"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run)."
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
@@ -449,8 +456,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"#Get_status () gets the latest status of the AmlCompute target\n", "#Get_status () gets the latest status of the AmlCompute target\n",
"cpu_cluster.get_status()\n", "cpu_cluster.get_status().serialize()\n"
"cpu_cluster.serialize()"
] ]
}, },
{ {

View File

@@ -29,7 +29,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Prerequisites\n", "## Prerequisites\n",
"Make sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't." "Make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't."
] ]
}, },
{ {
@@ -143,7 +143,7 @@
"run_config_user_managed.environment.python.user_managed_dependencies = True\n", "run_config_user_managed.environment.python.user_managed_dependencies = True\n",
"\n", "\n",
"# You can choose a specific Python environment by pointing to a Python path \n", "# You can choose a specific Python environment by pointing to a Python path \n",
"#run_config.environment.python.interpreter_path = '/home/johndoe/miniconda3/envs/sdk2/bin/python'" "#run_config.environment.python.interpreter_path = '/home/johndoe/miniconda3/envs/myenv/bin/python'"
] ]
}, },
{ {
@@ -182,6 +182,13 @@
"run" "run"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run)."
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -212,7 +219,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core.runconfig import RunConfiguration\n",
"from azureml.core.conda_dependencies import CondaDependencies\n", "from azureml.core.conda_dependencies import CondaDependencies\n",
"\n", "\n",
"run_config_system_managed = RunConfiguration()\n", "run_config_system_managed = RunConfiguration()\n",
@@ -281,6 +287,7 @@
"source": [ "source": [
"### Docker-based execution\n", "### Docker-based execution\n",
"**IMPORTANT**: You must have Docker engine installed locally in order to use this execution mode. If your kernel is already running in a Docker container, such as **Azure Notebooks**, this mode will **NOT** work.\n", "**IMPORTANT**: You must have Docker engine installed locally in order to use this execution mode. If your kernel is already running in a Docker container, such as **Azure Notebooks**, this mode will **NOT** work.\n",
"\n",
"NOTE: The GPU base image must be used on Microsoft Azure Services only such as ACI, AML Compute, Azure VMs, and AKS.\n", "NOTE: The GPU base image must be used on Microsoft Azure Services only such as ACI, AML Compute, Azure VMs, and AKS.\n",
"\n", "\n",
"You can also ask the system to pull down a Docker image and execute your scripts in it." "You can also ask the system to pull down a Docker image and execute your scripts in it."
@@ -296,6 +303,8 @@
"run_config_docker.environment.python.user_managed_dependencies = False\n", "run_config_docker.environment.python.user_managed_dependencies = False\n",
"run_config_docker.auto_prepare_environment = True\n", "run_config_docker.auto_prepare_environment = True\n",
"run_config_docker.environment.docker.enabled = True\n", "run_config_docker.environment.docker.enabled = True\n",
"\n",
"# use the default CPU-based Docker image from Azure ML\n",
"run_config_docker.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE\n", "run_config_docker.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE\n",
"\n", "\n",
"# Specify conda dependencies with scikit-learn\n", "# Specify conda dependencies with scikit-learn\n",
@@ -309,7 +318,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"Submit script to run in the system-managed environment\n", "### Submit script to run in the system-managed environment\n",
"A new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 mninutes. But this conda environment is reused so long as you don't change the conda dependencies.\n", "A new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 mninutes. But this conda environment is reused so long as you don't change the conda dependencies.\n",
"\n", "\n",
"\n" "\n"
@@ -353,6 +362,33 @@
"run.wait_for_completion(show_output=True)" "run.wait_for_completion(show_output=True)"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Use a custom Docker image\n",
"\n",
"You can also specify a custom Docker image if you don't want to use the default image provided by Azure ML.\n",
"\n",
"```python\n",
"# use an image available in Docker Hub without authentication\n",
"run_config_docker.environment.docker.base_image = \"continuumio/miniconda3\"\n",
"\n",
"# or, use an image available in a private Azure Container Registry\n",
"run_config_docker.environment.docker.base_image = \"mycustomimage:1.0\"\n",
"run_config_docker.environment.docker.base_image_registry.address = \"myregistry.azurecr.io\"\n",
"run_config_docker.environment.docker.base_image_registry.username = \"username\"\n",
"run_config_docker.environment.docker.base_image_registry.password = \"password\"\n",
"```\n",
"\n",
"When you are using a custom Docker image, you might already have your environment setup properly in a Python environment in the Docker image. In that case, you can skip specifying conda dependencies, and just use `user_managed_dependencies` option instead:\n",
"```python\n",
"run_config_docker.environment.python.user_managed_dependencies = True\n",
"# path to the Python environment in the custom Docker image\n",
"run_config.environment.python.interpreter_path = '/opt/conda/bin/python'\n",
"```"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -452,7 +488,7 @@
"metadata": { "metadata": {
"authors": [ "authors": [
{ {
"name": "roastala" "name": "haining"
} }
], ],
"kernelspec": { "kernelspec": {

View File

@@ -30,7 +30,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Prerequisites\n", "## Prerequisites\n",
"Make sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't." "Make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't."
] ]
}, },
{ {
@@ -190,7 +190,7 @@
"source": [ "source": [
"## Create and Attach a DSVM as a compute target\n", "## Create and Attach a DSVM as a compute target\n",
"\n", "\n",
"**Note**: To streamline the compute that Azure Machine Learning creates, we are making updates to support creating only single to multi-node `AmlCompute`. The `DSVMCompute` class will be deprecated in a later release, but the DSVM can be created using the below single line command and then attached(like any VM) using the sample code below. Also note, that we only support Linux VMs for remote execution from AML and the commands below will spin a Linux VM only.\n", "**Note**: To streamline the compute that Azure Machine Learning creates, we are making updates to support creating only single to multi-node `AmlCompute`. The DSVM can be created using the below single line command and then attached(like any VM) using the sample code below. Also note, that we only support Linux VMs for remote execution from AML and the commands below will spin a Linux VM only.\n",
"\n", "\n",
"```shell\n", "```shell\n",
"# create a DSVM in your resource group\n", "# create a DSVM in your resource group\n",
@@ -209,9 +209,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core.compute import RemoteCompute\n", "from azureml.core.compute import ComputeTarget, RemoteCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n", "from azureml.core.compute_target import ComputeTargetException\n",
"import os\n",
"\n", "\n",
"username = os.getenv('AZUREML_DSVM_USERNAME', default='<my_username>')\n", "username = os.getenv('AZUREML_DSVM_USERNAME', default='<my_username>')\n",
"address = os.getenv('AZUREML_DSVM_ADDRESS', default='<ip_address_or_fqdn>')\n", "address = os.getenv('AZUREML_DSVM_ADDRESS', default='<ip_address_or_fqdn>')\n",
@@ -222,13 +221,13 @@
" attached_dsvm_compute = RemoteCompute(workspace=ws, name=compute_target_name)\n", " attached_dsvm_compute = RemoteCompute(workspace=ws, name=compute_target_name)\n",
" print('found existing:', attached_dsvm_compute.name)\n", " print('found existing:', attached_dsvm_compute.name)\n",
"except ComputeTargetException:\n", "except ComputeTargetException:\n",
" attached_dsvm_compute = RemoteCompute.attach(workspace=ws,\n", " attach_config = RemoteCompute.attach_configuration(address=address,\n",
" name=compute_target_name,\n",
" username=username,\n",
" address=address,\n",
" ssh_port=22,\n", " ssh_port=22,\n",
" username=username,\n",
" private_key_file='./.ssh/id_rsa')\n", " private_key_file='./.ssh/id_rsa')\n",
" \n", " attached_dsvm_compute = ComputeTarget.attach(workspace=ws,\n",
" name=compute_target_name,\n",
" attach_config=attach_config)\n",
" attached_dsvm_compute.wait_for_completion(show_output=True)" " attached_dsvm_compute.wait_for_completion(show_output=True)"
] ]
}, },
@@ -296,7 +295,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core import Run\n",
"from azureml.core import ScriptRunConfig\n", "from azureml.core import ScriptRunConfig\n",
"\n", "\n",
"src = ScriptRunConfig(source_directory=script_folder, \n", "src = ScriptRunConfig(source_directory=script_folder, \n",
@@ -308,6 +306,13 @@
"run = exp.submit(config=src)" "run = exp.submit(config=src)"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run)."
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
@@ -386,7 +391,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"You can choose to SSH into the VM and install Azure ML SDK, and any other missing dependencies, in that Python environment. For demonstration purposes, we simply are going to create another script `train2.py` that doesn't have azureml dependencies, and submit it instead." "You can choose to SSH into the VM and install Azure ML SDK, and any other missing dependencies, in that Python environment. For demonstration purposes, we simply are going to use another script `train2.py` that doesn't have azureml dependencies, and submit it instead."
] ]
}, },
{ {
@@ -395,11 +400,11 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"%%writefile $script_folder/train2.py\n", "# copy train2.py into the script folder\n",
"shutil.copy('./train2.py', os.path.join(script_folder, 'train2.py'))\n",
"\n", "\n",
"print('####################################')\n", "with open(os.path.join(script_folder, './train2.py'), 'r') as training_script:\n",
"print('Hello World (without Azure ML SDK)!')\n", " print(training_script.read())"
"print('####################################')"
] ]
}, },
{ {
@@ -452,10 +457,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core.runconfig import RunConfiguration\n",
"from azureml.core.conda_dependencies import CondaDependencies\n",
"\n",
"\n",
"# Load the \"cpu-dsvm.runconfig\" file (created by the above attach operation) in memory\n", "# Load the \"cpu-dsvm.runconfig\" file (created by the above attach operation) in memory\n",
"docker_run_config = RunConfiguration(framework=\"python\")\n", "docker_run_config = RunConfiguration(framework=\"python\")\n",
"\n", "\n",
@@ -507,6 +508,33 @@
"run.wait_for_completion(show_output=True)" "run.wait_for_completion(show_output=True)"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Use a custom Docker image instead\n",
"\n",
"You can also specify a custom Docker image if you don't want to use the default image provided by Azure ML.\n",
"\n",
"```python\n",
"# use an image available in Docker Hub without authentication\n",
"run_config_docker.environment.docker.base_image = \"continuumio/miniconda3\"\n",
"\n",
"# or, use an image available in a private Azure Container Registry\n",
"run_config_docker.environment.docker.base_image = \"mycustomimage:1.0\"\n",
"run_config_docker.environment.docker.base_image_registry.address = \"myregistry.azurecr.io\"\n",
"run_config_docker.environment.docker.base_image_registry.username = \"username\"\n",
"run_config_docker.environment.docker.base_image_registry.password = \"password\"\n",
"```\n",
"\n",
"When you are using a custom Docker image, you might already have your environment setup properly in a Python environment in the Docker image. In that case, you can skip specifying conda dependencies, and just use `user_managed_dependencies` option instead:\n",
"```python\n",
"run_config_docker.environment.python.user_managed_dependencies = True\n",
"# path to the Python environment in the custom Docker image\n",
"run_config.environment.python.interpreter_path = '/opt/conda/bin/python'\n",
"```"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},

View File

@@ -0,0 +1,6 @@
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license.
print('####################################')
print('Hello World (without Azure ML SDK)!')
print('####################################')

View File

@@ -57,7 +57,7 @@
"---\n", "---\n",
"\n", "\n",
"## Setup\n", "## Setup\n",
"Make sure you have completed the [Configuration](..\\..\\configuration.ipnyb) notebook to set up your Azure Machine Learning workspace and ensure other common prerequisites are met. From the configuration, the important sections are the workspace configuration and ACI regristration.\n", "Make sure you have completed the [Configuration](../../../configuration.ipnyb) notebook to set up your Azure Machine Learning workspace and ensure other common prerequisites are met. From the configuration, the important sections are the workspace configuration and ACI regristration.\n",
"\n", "\n",
"We will also need the following libraries install to our conda environment. If these are not installed, use the following command to do so and restart the notebook.\n", "We will also need the following libraries install to our conda environment. If these are not installed, use the following command to do so and restart the notebook.\n",
"```shell\n", "```shell\n",
@@ -78,10 +78,10 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import azureml.core\n", "import azureml.core\n",
"from azureml.core import Experiment, Run, Workspace\n", "from azureml.core import Experiment, Workspace\n",
"\n", "\n",
"# Check core SDK version number\n", "# Check core SDK version number\n",
"print(\"This notebook was created using version 1.0.2 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.0.15 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")\n",
"print(\"\")\n", "print(\"\")\n",
"\n", "\n",
@@ -157,7 +157,8 @@
"experiment = Experiment(workspace=ws, name=\"train-within-notebook\")\n", "experiment = Experiment(workspace=ws, name=\"train-within-notebook\")\n",
"\n", "\n",
"# Create a run object in the experiment\n", "# Create a run object in the experiment\n",
"run = experiment.start_logging()# Log the algorithm parameter alpha to the run\n", "run = experiment.start_logging()\n",
"# Log the algorithm parameter alpha to the run\n",
"run.log('alpha', 0.03)\n", "run.log('alpha', 0.03)\n",
"\n", "\n",
"# Create, fit, and test the scikit-learn Ridge regression model\n", "# Create, fit, and test the scikit-learn Ridge regression model\n",
@@ -215,7 +216,6 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import numpy as np\n", "import numpy as np\n",
"import os\n",
"from tqdm import tqdm\n", "from tqdm import tqdm\n",
"\n", "\n",
"model_name = \"model.pkl\"\n", "model_name = \"model.pkl\"\n",
@@ -568,7 +568,6 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import requests\n", "import requests\n",
"import json\n",
"\n", "\n",
"# use the first row from the test set again\n", "# use the first row from the test set again\n",
"test_samples = json.dumps({\"data\": X_test[0:1, :].tolist()})\n", "test_samples = json.dumps({\"data\": X_test[0:1, :].tolist()})\n",
@@ -598,7 +597,6 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"%matplotlib inline\n", "%matplotlib inline\n",
"import matplotlib\n",
"import matplotlib.pyplot as plt\n", "import matplotlib.pyplot as plt\n",
"\n", "\n",
"f, (a0, a1) = plt.subplots(1, 2, gridspec_kw={'width_ratios':[3, 1], 'wspace':0, 'hspace': 0})\n", "f, (a0, a1) = plt.subplots(1, 2, gridspec_kw={'width_ratios':[3, 1], 'wspace':0, 'hspace': 0})\n",
@@ -607,13 +605,13 @@
"f.set_figheight(6)\n", "f.set_figheight(6)\n",
"f.set_figwidth(14)\n", "f.set_figwidth(14)\n",
"\n", "\n",
"a0.plot(residual, 'bo', alpha=0.4);\n", "a0.plot(residual, 'bo', alpha=0.4)\n",
"a0.plot([0,90], [0,0], 'r', lw=2)\n", "a0.plot([0,90], [0,0], 'r', lw=2)\n",
"a0.set_ylabel('residue values', fontsize=14)\n", "a0.set_ylabel('residue values', fontsize=14)\n",
"a0.set_xlabel('test data set', fontsize=14)\n", "a0.set_xlabel('test data set', fontsize=14)\n",
"\n", "\n",
"a1.hist(residual, orientation='horizontal', color='blue', bins=10, histtype='step');\n", "a1.hist(residual, orientation='horizontal', color='blue', bins=10, histtype='step')\n",
"a1.hist(residual, orientation='horizontal', color='blue', alpha=0.2, bins=10);\n", "a1.hist(residual, orientation='horizontal', color='blue', alpha=0.2, bins=10)\n",
"a1.set_yticklabels([])\n", "a1.set_yticklabels([])\n",
"\n", "\n",
"plt.show()" "plt.show()"
@@ -682,11 +680,11 @@
"metadata": { "metadata": {
"authors": [ "authors": [
{ {
"name": "roastala" "name": "haining"
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python [Python 3.6]", "display_name": "Python 3.6",
"language": "python", "language": "python",
"name": "python36" "name": "python36"
}, },

View File

@@ -1,11 +1,20 @@
## Azure Machine Learning service Tutorial ## Azure Machine Learning service Tutorial
Complete these tutorials to learn how to train and deploy models using Azure Machine Learning services and Python SDK. These Notebooks accompany the [tutorial articles starting here]([https://docs.microsoft.com/en-us/azure/machine-learning/service/tutorial-train-models-with-aml]). Complete these tutorials to learn how to train and deploy models using Azure Machine Learning services and Python SDK. These Notebooks accompany the
two sets of tutorial articles for:
* [Image classification using MNIST dataset](https://docs.microsoft.com/en-us/azure/machine-learning/service/tutorial-train-models-with-aml)
* [Regression using NYC Taxi dataset](https://docs.microsoft.com/en-us/azure/machine-learning/service/tutorial-data-prep)
As a pre-requisite, run the [configuration Notebook](../configuration.ipynb) notebook first to set up your Azure ML Workspace. Then, run the notebooks in following recommended order. As a pre-requisite, run the [configuration Notebook](../configuration.ipynb) notebook first to set up your Azure ML Workspace. Then, run the notebooks in following recommended order.
* [Tutorial #1](img-classification-part1-training.ipynb): Train an image classification model with Azure Machine Learning ### Image classification
* [Tutorial #2](img-classification-part2-deploy.ipynb): Deploy an image classification model from first tutorial in Azure Container Instance (ACI)
* [Tutorial #3](regression-part1-data-prep.ipynb): Use data preparation. * [Part 1](img-classification-part1-training.ipynb): Train an image classification model with Azure Machine Learning.
* [Part 2](img-classification-part2-deploy.ipynb): Deploy an image classification model from first tutorial in Azure Container Instance (ACI).
### Regression
* [Part 1](regression-part1-data-prep.ipynb): Prepare the data using Azure Machine Learning Data Prep SDK.
* [Part 2](regression-part2-automated-ml.ipynb): Train a model using Automated Machine Learning.
Also find quickstarts and how-tos on the [official documentation site for Azure Machine Learning service](https://docs.microsoft.com/en-us/azure/machine-learning/service/). Also find quickstarts and how-tos on the [official documentation site for Azure Machine Learning service](https://docs.microsoft.com/en-us/azure/machine-learning/service/).

View File

@@ -66,11 +66,10 @@
"source": [ "source": [
"%matplotlib inline\n", "%matplotlib inline\n",
"import numpy as np\n", "import numpy as np\n",
"import matplotlib\n",
"import matplotlib.pyplot as plt\n", "import matplotlib.pyplot as plt\n",
"\n", "\n",
"import azureml\n", "import azureml.core\n",
"from azureml.core import Workspace, Run\n", "from azureml.core import Workspace\n",
"\n", "\n",
"# check core SDK version number\n", "# check core SDK version number\n",
"print(\"Azure ML SDK Version: \", azureml.core.VERSION)" "print(\"Azure ML SDK Version: \", azureml.core.VERSION)"
@@ -176,8 +175,8 @@
" # if no min node count is provided it will use the scale settings for the cluster\n", " # if no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n", " compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
" \n", " \n",
" # For a more detailed view of current AmlCompute status, use the 'status' property \n", " # For a more detailed view of current AmlCompute status, use get_status()\n",
" print(compute_target.status.serialize())" " print(compute_target.get_status().serialize())"
] ]
}, },
{ {
@@ -205,7 +204,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import os\n",
"import urllib.request\n", "import urllib.request\n",
"\n", "\n",
"os.makedirs('./data', exist_ok = True)\n", "os.makedirs('./data', exist_ok = True)\n",
@@ -354,7 +352,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import os\n",
"script_folder = './sklearn-mnist'\n", "script_folder = './sklearn-mnist'\n",
"os.makedirs(script_folder, exist_ok=True)" "os.makedirs(script_folder, exist_ok=True)"
] ]
@@ -573,6 +570,13 @@
"RunDetails(run).show()" "RunDetails(run).show()"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run)."
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -690,7 +694,7 @@
"metadata": { "metadata": {
"authors": [ "authors": [
{ {
"name": "roastala" "name": "haining"
} }
], ],
"kernelspec": { "kernelspec": {

View File

@@ -99,11 +99,9 @@
"source": [ "source": [
"%matplotlib inline\n", "%matplotlib inline\n",
"import numpy as np\n", "import numpy as np\n",
"import matplotlib\n",
"import matplotlib.pyplot as plt\n", "import matplotlib.pyplot as plt\n",
" \n", " \n",
"import azureml\n", "import azureml\n",
"from azureml.core import Workspace, Run\n",
"\n", "\n",
"# display the core SDK version number\n", "# display the core SDK version number\n",
"print(\"Azure ML SDK Version: \", azureml.core.VERSION)" "print(\"Azure ML SDK Version: \", azureml.core.VERSION)"
@@ -129,13 +127,9 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core import Workspace\n",
"from azureml.core.model import Model\n",
"\n",
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"model=Model(ws, 'sklearn_mnist')\n", "model=Model(ws, 'sklearn_mnist')\n",
"model.download(target_dir='.', exist_ok=True)\n", "model.download(target_dir='.', exist_ok=True)\n",
"import os \n",
"# verify the downloaded model file\n", "# verify the downloaded model file\n",
"os.stat('./sklearn_mnist_model.pkl')" "os.stat('./sklearn_mnist_model.pkl')"
] ]
@@ -521,7 +515,6 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import requests\n", "import requests\n",
"import json\n",
"\n", "\n",
"# send a random row from the test set to score\n", "# send a random row from the test set to score\n",
"random_index = np.random.randint(0, len(X_test)-1)\n", "random_index = np.random.randint(0, len(X_test)-1)\n",
@@ -581,14 +574,14 @@
"> * Deploy the model to ACI\n", "> * Deploy the model to ACI\n",
"> * Test the deployed model\n", "> * Test the deployed model\n",
" \n", " \n",
"You can also try out the [Automatic algorithm selection tutorial](03.auto-train-models.ipynb) to see how Azure Machine Learning can auto-select and tune the best algorithm for your model and build that model for you." "You can also try out the [regression tutorial](regression-part1-data-prep.ipynb)."
] ]
} }
], ],
"metadata": { "metadata": {
"authors": [ "authors": [
{ {
"name": "roastala" "name": "haining"
} }
], ],
"kernelspec": { "kernelspec": {

View File

@@ -26,7 +26,7 @@
"> * Explore the results\n", "> * Explore the results\n",
"> * Register the best model\n", "> * Register the best model\n",
"\n", "\n",
"If you dont have an Azure subscription, create a [free account](https://aka.ms/AMLfree) before you begin. \n", "If you don\u00e2\u20ac\u2122t have an Azure subscription, create a [free account](https://aka.ms/AMLfree) before you begin. \n",
"\n", "\n",
"> Code in this article was tested with Azure Machine Learning SDK version 1.0.0\n", "> Code in this article was tested with Azure Machine Learning SDK version 1.0.0\n",
"\n", "\n",
@@ -55,8 +55,6 @@
"import azureml.core\n", "import azureml.core\n",
"import pandas as pd\n", "import pandas as pd\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.train.automl.run import AutoMLRun\n",
"import time\n",
"import logging" "import logging"
] ]
}, },
@@ -93,7 +91,8 @@
"output['Location'] = ws.location\n", "output['Location'] = ws.location\n",
"output['Project Directory'] = project_folder\n", "output['Project Directory'] = project_folder\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option('display.max_colwidth', -1)\n",
"pd.DataFrame(data=output, index=['']).T" "outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
] ]
}, },
{ {
@@ -112,7 +111,6 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import azureml.dataprep as dprep\n", "import azureml.dataprep as dprep\n",
"import os\n",
"\n", "\n",
"file_path = os.path.join(os.getcwd(), \"dflows.dprep\")\n", "file_path = os.path.join(os.getcwd(), \"dflows.dprep\")\n",
"\n", "\n",
@@ -308,7 +306,6 @@
" metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}\n", " metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}\n",
" metricslist[int(properties['iteration'])] = metrics\n", " metricslist[int(properties['iteration'])] = metrics\n",
"\n", "\n",
"import pandas as pd\n",
"rundata = pd.DataFrame(metricslist).sort_index(1)\n", "rundata = pd.DataFrame(metricslist).sort_index(1)\n",
"rundata" "rundata"
] ]
@@ -351,7 +348,7 @@
"description = 'Automated Machine Learning Model'\n", "description = 'Automated Machine Learning Model'\n",
"tags = None\n", "tags = None\n",
"local_run.register_model(description=description, tags=tags)\n", "local_run.register_model(description=description, tags=tags)\n",
"local_run.model_id # Use this id to deploy the model as a web service in Azure" "print(local_run.model_id) # Use this id to deploy the model as a web service in Azure"
] ]
}, },
{ {
@@ -471,7 +468,7 @@
"> * Explored and reviewed training results\n", "> * Explored and reviewed training results\n",
"> * Registered the best model\n", "> * Registered the best model\n",
"\n", "\n",
"[Deploy your model](02.deploy-models.ipynb) with Azure Machine Learning." "You can also try out the [image classification tutorial](img-classification-part1-training.ipynb)."
] ]
} }
], ],