mirror of
https://github.com/Azure/MachineLearningNotebooks.git
synced 2025-12-19 17:17:04 -05:00
update samples from Release-63 as a part of SDK release
This commit is contained in:
@@ -65,7 +65,7 @@ Visit following repos to see projects contributed by Azure ML users:
|
||||
- [UMass Amherst Student Samples](https://github.com/katiehouse3/microsoft-azure-ml-notebooks) - A number of end-to-end machine learning notebooks, including machine translation, image classification, and customer churn, created by students in the 696DS course at UMass Amherst.
|
||||
|
||||
## Data/Telemetry
|
||||
This repository collects usage data and sends it to Mircosoft to help improve our products and services. Read Microsoft's [privacy statement to learn more](https://privacy.microsoft.com/en-US/privacystatement)
|
||||
This repository collects usage data and sends it to Microsoft to help improve our products and services. Read Microsoft's [privacy statement to learn more](https://privacy.microsoft.com/en-US/privacystatement)
|
||||
|
||||
To opt out of tracking, please go to the raw markdown or .ipynb files and remove the following line of code:
|
||||
|
||||
|
||||
@@ -103,7 +103,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.13.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -4,7 +4,7 @@ Learn how to use Azure Machine Learning services for experimentation and model m
|
||||
|
||||
As a pre-requisite, run the [configuration Notebook](../configuration.ipynb) notebook first to set up your Azure ML Workspace. Then, run the notebooks in following recommended order.
|
||||
|
||||
* [train-within-notebook](./training/train-within-notebook): Train a model hile tracking run history, and learn how to deploy the model as web service to Azure Container Instance.
|
||||
* [train-within-notebook](./training/train-within-notebook): Train a model while tracking run history, and learn how to deploy the model as web service to Azure Container Instance.
|
||||
* [train-on-local](./training/train-on-local): Learn how to submit a run to local computer and use Azure ML managed run configuration.
|
||||
* [train-on-amlcompute](./training/train-on-amlcompute): Use a 1-n node Azure ML managed compute cluster for remote runs on Azure CPU or GPU infrastructure.
|
||||
* [train-on-remote-vm](./training/train-on-remote-vm): Use Data Science Virtual Machine as a target for remote runs.
|
||||
|
||||
@@ -6,12 +6,12 @@ dependencies:
|
||||
- python>=3.5.2,<3.6.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy~=1.16.0
|
||||
- numpy~=1.18.0
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scipy==1.4.1
|
||||
- scikit-learn>=0.19.0,<=0.20.3
|
||||
- pandas>=0.22.0,<=0.23.4
|
||||
- scikit-learn==0.22.1
|
||||
- pandas==0.25.1
|
||||
- py-xgboost<=0.90
|
||||
- conda-forge::fbprophet==0.5
|
||||
- holidays==0.9.11
|
||||
@@ -20,12 +20,9 @@ dependencies:
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-defaults
|
||||
- azureml-train-automl
|
||||
- azureml-train
|
||||
- azureml-widgets
|
||||
- azureml-pipeline
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.13.0/validated_win32_requirements.txt [--no-deps]
|
||||
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
name: azure_automl
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.5.2 and later.
|
||||
- pip<=19.3.1
|
||||
- python>=3.5.2,<3.6.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy~=1.18.0
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scipy==1.4.1
|
||||
- scikit-learn==0.22.1
|
||||
- pandas==0.25.1
|
||||
- py-xgboost<=0.90
|
||||
- conda-forge::fbprophet==0.5
|
||||
- holidays==0.9.11
|
||||
- pytorch::pytorch=1.4.0
|
||||
- cudatoolkit=10.1.243
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.13.0/validated_linux_requirements.txt [--no-deps]
|
||||
|
||||
@@ -7,12 +7,12 @@ dependencies:
|
||||
- python>=3.5.2,<3.6.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy~=1.16.0
|
||||
- numpy~=1.18.0
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scipy==1.4.1
|
||||
- scikit-learn>=0.19.0,<=0.20.3
|
||||
- pandas>=0.22.0,<=0.23.4
|
||||
- scikit-learn==0.22.1
|
||||
- pandas==0.25.1
|
||||
- py-xgboost<=0.90
|
||||
- conda-forge::fbprophet==0.5
|
||||
- holidays==0.9.11
|
||||
@@ -21,11 +21,8 @@ dependencies:
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-defaults
|
||||
- azureml-train-automl
|
||||
- azureml-train
|
||||
- azureml-widgets
|
||||
- azureml-pipeline
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.13.0/validated_darwin_requirements.txt [--no-deps]
|
||||
|
||||
@@ -12,7 +12,7 @@ fi
|
||||
|
||||
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||
then
|
||||
AUTOML_ENV_FILE="automl_env.yml"
|
||||
AUTOML_ENV_FILE="automl_env_linux.yml"
|
||||
fi
|
||||
|
||||
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||
|
||||
@@ -105,7 +105,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.13.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -93,7 +93,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.13.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.13.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -151,6 +151,8 @@
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"num_nodes = 2\n",
|
||||
"\n",
|
||||
"# Choose a name for your cluster.\n",
|
||||
"amlcompute_cluster_name = \"dnntext-cluster\"\n",
|
||||
"\n",
|
||||
@@ -163,7 +165,7 @@
|
||||
" # To use BERT (this is recommended for best performance), select a GPU such as \"STANDARD_NC6\" \n",
|
||||
" # or similar GPU option\n",
|
||||
" # available in your workspace\n",
|
||||
" max_nodes = 1)\n",
|
||||
" max_nodes = num_nodes)\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -282,7 +284,7 @@
|
||||
"automl_settings = {\n",
|
||||
" \"experiment_timeout_minutes\": 20,\n",
|
||||
" \"primary_metric\": 'accuracy',\n",
|
||||
" \"max_concurrent_iterations\": 4, \n",
|
||||
" \"max_concurrent_iterations\": num_nodes, \n",
|
||||
" \"max_cores_per_iteration\": -1,\n",
|
||||
" \"enable_dnn\": True,\n",
|
||||
" \"enable_early_stopping\": True,\n",
|
||||
|
||||
@@ -88,7 +88,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.13.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -0,0 +1,92 @@
|
||||
# Experimental Notebooks for Automated ML
|
||||
Notebooks listed in this folder are leveraging experimental features. Namespaces or function signitures may change in future SDK releases. The notebooks published here will reflect the latest supported APIs. All of these notebooks can run on a client-only installation of the Automated ML SDK.
|
||||
The client only installation doesn't contain any of the machine learning libraries, such as scikit-learn, xgboost, or tensorflow, making it much faster to install and is less likely to conflict with any packages in an existing environment. However, since the ML libraries are not available locally, models cannot be downloaded and loaded directly in the client. To replace the functionality of having models locally, these notebooks also demonstrate the ModelProxy feature which will allow you to submit a predict/forecast to the training environment.
|
||||
|
||||
<a name="localconda"></a>
|
||||
## Setup using a Local Conda environment
|
||||
|
||||
To run these notebook on your own notebook server, use these installation instructions.
|
||||
The instructions below will install everything you need and then start a Jupyter notebook.
|
||||
If you would like to use a lighter-weight version of the client that does not install all of the machine learning libraries locally, you can leverage the [experimental notebooks.](experimental/README.md)
|
||||
|
||||
### 1. Install mini-conda from [here](https://conda.io/miniconda.html), choose 64-bit Python 3.7 or higher.
|
||||
- **Note**: if you already have conda installed, you can keep using it but it should be version 4.4.10 or later (as shown by: conda -V). If you have a previous version installed, you can update it using the command: conda update conda.
|
||||
There's no need to install mini-conda specifically.
|
||||
|
||||
### 2. Downloading the sample notebooks
|
||||
- Download the sample notebooks from [GitHub](https://github.com/Azure/MachineLearningNotebooks) as zip and extract the contents to a local directory. The automated ML sample notebooks are in the "automated-machine-learning" folder.
|
||||
|
||||
### 3. Setup a new conda environment
|
||||
The **automl_setup** script creates a new conda environment, installs the necessary packages, configures the widget and starts a jupyter notebook. It takes the conda environment name as an optional parameter. The default conda environment name is azure_automl. The exact command depends on the operating system. See the specific sections below for Windows, Mac and Linux. It can take about 10 minutes to execute.
|
||||
|
||||
Packages installed by the **automl_setup** script:
|
||||
<ul><li>python</li><li>nb_conda</li><li>matplotlib</li><li>numpy</li><li>cython</li><li>urllib3</li><li>pandas</li><li>azureml-sdk</li><li>azureml-widgets</li><li>pandas-ml</li></ul>
|
||||
|
||||
For more details refer to the [automl_env.yml](./automl_env.yml)
|
||||
## Windows
|
||||
Start an **Anaconda Prompt** window, cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run:
|
||||
```
|
||||
automl_setup
|
||||
```
|
||||
## Mac
|
||||
Install "Command line developer tools" if it is not already installed (you can use the command: `xcode-select --install`).
|
||||
|
||||
Start a Terminal windows, cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run:
|
||||
|
||||
```
|
||||
bash automl_setup_mac.sh
|
||||
```
|
||||
|
||||
## Linux
|
||||
cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run:
|
||||
|
||||
```
|
||||
bash automl_setup_linux.sh
|
||||
```
|
||||
|
||||
### 4. Running configuration.ipynb
|
||||
- Before running any samples you next need to run the configuration notebook. Click on [configuration](../../configuration.ipynb) notebook
|
||||
- Execute the cells in the notebook to Register Machine Learning Services Resource Provider and create a workspace. (*instructions in notebook*)
|
||||
|
||||
### 5. Running Samples
|
||||
- Please make sure you use the Python [conda env:azure_automl] kernel when trying the sample Notebooks.
|
||||
- Follow the instructions in the individual notebooks to explore various features in automated ML.
|
||||
|
||||
### 6. Starting jupyter notebook manually
|
||||
To start your Jupyter notebook manually, use:
|
||||
|
||||
```
|
||||
conda activate azure_automl
|
||||
jupyter notebook
|
||||
```
|
||||
|
||||
or on Mac or Linux:
|
||||
|
||||
```
|
||||
source activate azure_automl
|
||||
jupyter notebook
|
||||
```
|
||||
|
||||
|
||||
<a name="samples"></a>
|
||||
# Automated ML SDK Sample Notebooks
|
||||
|
||||
- [auto-ml-regression.ipynb](regression/auto-ml-regression.ipynb)
|
||||
- Dataset: Hardware Performance Dataset
|
||||
- Simple example of using automated ML for regression
|
||||
- Uses azure compute for training
|
||||
- Uses ModelProxy for submitting prediction to training environment on azure compute
|
||||
|
||||
<a name="documentation"></a>
|
||||
See [Configure automated machine learning experiments](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-auto-train) to learn how more about the the settings and features available for automated machine learning experiments.
|
||||
|
||||
<a name="pythoncommand"></a>
|
||||
# Running using python command
|
||||
Jupyter notebook provides a File / Download as / Python (.py) option for saving the notebook as a Python file.
|
||||
You can then run this file using the python command.
|
||||
However, on Windows the file needs to be modified before it can be run.
|
||||
The following condition must be added to the main code in the file:
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
The main code of the file must be indented so that it is under this condition.
|
||||
@@ -0,0 +1,20 @@
|
||||
name: azure_automl_experimental
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.5.2 and later.
|
||||
- pip<=19.3.1
|
||||
- python>=3.5.2,<3.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy~=1.18.0
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scikit-learn==0.22.1
|
||||
- pandas==0.25.1
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-defaults
|
||||
- azureml-sdk
|
||||
- azureml-widgets
|
||||
- azureml-explain-model
|
||||
@@ -0,0 +1,21 @@
|
||||
name: azure_automl_experimental
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.5.2 and later.
|
||||
- pip<=19.3.1
|
||||
- nomkl
|
||||
- python>=3.5.2,<3.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy~=1.18.0
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scikit-learn==0.22.1
|
||||
- pandas==0.25.1
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-defaults
|
||||
- azureml-sdk
|
||||
- azureml-widgets
|
||||
- azureml-explain-model
|
||||
@@ -0,0 +1,63 @@
|
||||
@echo off
|
||||
set conda_env_name=%1
|
||||
set automl_env_file=%2
|
||||
set options=%3
|
||||
set PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||
|
||||
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl_experimental"
|
||||
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml"
|
||||
|
||||
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
||||
|
||||
IF "%CONDA_EXE%"=="" GOTO CondaMissing
|
||||
|
||||
call conda activate %conda_env_name% 2>nul:
|
||||
|
||||
if not errorlevel 1 (
|
||||
echo Upgrading existing conda environment %conda_env_name%
|
||||
call pip uninstall azureml-train-automl -y -q
|
||||
call conda env update --name %conda_env_name% --file %automl_env_file%
|
||||
if errorlevel 1 goto ErrorExit
|
||||
) else (
|
||||
call conda env create -f %automl_env_file% -n %conda_env_name%
|
||||
)
|
||||
|
||||
call conda activate %conda_env_name% 2>nul:
|
||||
if errorlevel 1 goto ErrorExit
|
||||
|
||||
call python -m ipykernel install --user --name %conda_env_name% --display-name "Python (%conda_env_name%)"
|
||||
|
||||
REM azureml.widgets is now installed as part of the pip install under the conda env.
|
||||
REM Removing the old user install so that the notebooks will use the latest widget.
|
||||
call jupyter nbextension uninstall --user --py azureml.widgets
|
||||
|
||||
echo.
|
||||
echo.
|
||||
echo ***************************************
|
||||
echo * AutoML setup completed successfully *
|
||||
echo ***************************************
|
||||
IF NOT "%options%"=="nolaunch" (
|
||||
echo.
|
||||
echo Starting jupyter notebook - please run the configuration notebook
|
||||
echo.
|
||||
jupyter notebook --log-level=50 --notebook-dir='..\..'
|
||||
)
|
||||
|
||||
goto End
|
||||
|
||||
:CondaMissing
|
||||
echo Please run this script from an Anaconda Prompt window.
|
||||
echo You can start an Anaconda Prompt window by
|
||||
echo typing Anaconda Prompt on the Start menu.
|
||||
echo If you don't see the Anaconda Prompt app, install Miniconda.
|
||||
echo If you are running an older version of Miniconda or Anaconda,
|
||||
echo you can upgrade using the command: conda update conda
|
||||
goto End
|
||||
|
||||
:YmlMissing
|
||||
echo File %automl_env_file% not found.
|
||||
|
||||
:ErrorExit
|
||||
echo Install failed
|
||||
|
||||
:End
|
||||
@@ -0,0 +1,53 @@
|
||||
#!/bin/bash
|
||||
|
||||
CONDA_ENV_NAME=$1
|
||||
AUTOML_ENV_FILE=$2
|
||||
OPTIONS=$3
|
||||
PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||
|
||||
if [ "$CONDA_ENV_NAME" == "" ]
|
||||
then
|
||||
CONDA_ENV_NAME="azure_automl_experimental"
|
||||
fi
|
||||
|
||||
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||
then
|
||||
AUTOML_ENV_FILE="automl_env.yml"
|
||||
fi
|
||||
|
||||
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||
echo "File $AUTOML_ENV_FILE not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if source activate $CONDA_ENV_NAME 2> /dev/null
|
||||
then
|
||||
echo "Upgrading existing conda environment" $CONDA_ENV_NAME
|
||||
pip uninstall azureml-train-automl -y -q
|
||||
conda env update --name $CONDA_ENV_NAME --file $AUTOML_ENV_FILE &&
|
||||
jupyter nbextension uninstall --user --py azureml.widgets
|
||||
else
|
||||
conda env create -f $AUTOML_ENV_FILE -n $CONDA_ENV_NAME &&
|
||||
source activate $CONDA_ENV_NAME &&
|
||||
python -m ipykernel install --user --name $CONDA_ENV_NAME --display-name "Python ($CONDA_ENV_NAME)" &&
|
||||
jupyter nbextension uninstall --user --py azureml.widgets &&
|
||||
echo "" &&
|
||||
echo "" &&
|
||||
echo "***************************************" &&
|
||||
echo "* AutoML setup completed successfully *" &&
|
||||
echo "***************************************" &&
|
||||
if [ "$OPTIONS" != "nolaunch" ]
|
||||
then
|
||||
echo "" &&
|
||||
echo "Starting jupyter notebook - please run the configuration notebook" &&
|
||||
echo "" &&
|
||||
jupyter notebook --log-level=50 --notebook-dir '../..'
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $? -gt 0 ]
|
||||
then
|
||||
echo "Installation failed"
|
||||
fi
|
||||
|
||||
|
||||
@@ -0,0 +1,55 @@
|
||||
#!/bin/bash
|
||||
|
||||
CONDA_ENV_NAME=$1
|
||||
AUTOML_ENV_FILE=$2
|
||||
OPTIONS=$3
|
||||
PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||
|
||||
if [ "$CONDA_ENV_NAME" == "" ]
|
||||
then
|
||||
CONDA_ENV_NAME="azure_automl_experimental"
|
||||
fi
|
||||
|
||||
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||
then
|
||||
AUTOML_ENV_FILE="automl_env.yml"
|
||||
fi
|
||||
|
||||
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||
echo "File $AUTOML_ENV_FILE not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if source activate $CONDA_ENV_NAME 2> /dev/null
|
||||
then
|
||||
echo "Upgrading existing conda environment" $CONDA_ENV_NAME
|
||||
pip uninstall azureml-train-automl -y -q
|
||||
conda env update --name $CONDA_ENV_NAME --file $AUTOML_ENV_FILE &&
|
||||
jupyter nbextension uninstall --user --py azureml.widgets
|
||||
else
|
||||
conda env create -f $AUTOML_ENV_FILE -n $CONDA_ENV_NAME &&
|
||||
source activate $CONDA_ENV_NAME &&
|
||||
conda install lightgbm -c conda-forge -y &&
|
||||
python -m ipykernel install --user --name $CONDA_ENV_NAME --display-name "Python ($CONDA_ENV_NAME)" &&
|
||||
jupyter nbextension uninstall --user --py azureml.widgets &&
|
||||
echo "" &&
|
||||
echo "" &&
|
||||
echo "***************************************" &&
|
||||
echo "* AutoML setup completed successfully *" &&
|
||||
echo "***************************************" &&
|
||||
if [ "$OPTIONS" != "nolaunch" ]
|
||||
then
|
||||
echo "" &&
|
||||
echo "Starting jupyter notebook - please run the configuration notebook" &&
|
||||
echo "" &&
|
||||
jupyter notebook --log-level=50 --notebook-dir '../..'
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $? -gt 0 ]
|
||||
then
|
||||
echo "Installation failed"
|
||||
fi
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,481 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Automated Machine Learning\n",
|
||||
"_**Regression with Aml Compute**_\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Data](#Data)\n",
|
||||
"1. [Train](#Train)\n",
|
||||
"1. [Results](#Results)\n",
|
||||
"1. [Test](#Test)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"In this example we use the Hardware Performance Dataset to showcase how you can use AutoML for a simple regression problem. The Regression goal is to predict the performance of certain combinations of hardware parts.\n",
|
||||
"\n",
|
||||
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
|
||||
"\n",
|
||||
"In this notebook you will learn how to:\n",
|
||||
"1. Create an `Experiment` in an existing `Workspace`.\n",
|
||||
"2. Configure AutoML using `AutoMLConfig`.\n",
|
||||
"3. Train the model using remote compute.\n",
|
||||
"4. Explore the results.\n",
|
||||
"5. Test the best fitted model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"from azureml.train.automl import AutoMLConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.13.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# Choose a name for the experiment.\n",
|
||||
"experiment_name = 'automl-regression-model-proxy'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your CPU cluster\n",
|
||||
"cpu_cluster_name = \"reg-cluster\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
||||
" max_nodes=4)\n",
|
||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load Data\n",
|
||||
"Load the hardware dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/machineData.csv\"\n",
|
||||
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
||||
"\n",
|
||||
"# Split the dataset into train and test datasets\n",
|
||||
"train_data, test_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
||||
"\n",
|
||||
"label = \"ERP\"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train\n",
|
||||
"\n",
|
||||
"Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.\n",
|
||||
"\n",
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**task**|classification, regression or forecasting|\n",
|
||||
"|**primary_metric**|This is the metric that you want to optimize. Regression supports the following primary metrics: <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|**training_data**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**label_column_name**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
|
||||
"\n",
|
||||
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"automlconfig-remarks-sample"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"n_cross_validations\": 3,\n",
|
||||
" \"primary_metric\": 'r2_score',\n",
|
||||
" \"enable_early_stopping\": True, \n",
|
||||
" \"experiment_timeout_hours\": 0.3, #for real scenarios we reccommend a timeout of at least one hour \n",
|
||||
" \"max_concurrent_iterations\": 4,\n",
|
||||
" \"max_cores_per_iteration\": -1,\n",
|
||||
" \"verbosity\": logging.INFO,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'regression',\n",
|
||||
" compute_target = compute_target,\n",
|
||||
" training_data = train_data,\n",
|
||||
" label_column_name = label,\n",
|
||||
" **automl_settings\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the `submit` method on the experiment object and pass the run configuration. Execution of remote runs is asynchronous. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run = experiment.submit(automl_config, show_output = False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# If you need to retrieve a run that already started, use the following code\n",
|
||||
"#from azureml.train.automl.run import AutoMLRun\n",
|
||||
"#remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Widget for Monitoring Runs\n",
|
||||
"\n",
|
||||
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
|
||||
"\n",
|
||||
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(remote_run).show() "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run.wait_for_completion()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve the Best Child Run\n",
|
||||
"\n",
|
||||
"Below we select the best pipeline from our iterations. The `get_best_child` method returns the best run. Overloads on `get_best_child` allow you to retrieve the best run for *any* logged metric."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run = remote_run.get_best_child()\n",
|
||||
"print(best_run)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Best Child Run Based on Any Other Metric\n",
|
||||
"Show the run and the model that has the smallest `root_mean_squared_error` value (which turned out to be the same as the one with largest `spearman_correlation` value):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"lookup_metric = \"root_mean_squared_error\"\n",
|
||||
"best_run = remote_run.get_best_child(metric = lookup_metric)\n",
|
||||
"print(best_run)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# preview the first 3 rows of the dataset\n",
|
||||
"\n",
|
||||
"test_data = test_data.to_pandas_dataframe()\n",
|
||||
"y_test = test_data['ERP'].fillna(0)\n",
|
||||
"test_data = test_data.drop('ERP', 1)\n",
|
||||
"test_data = test_data.fillna(0)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"train_data = train_data.to_pandas_dataframe()\n",
|
||||
"y_train = train_data['ERP'].fillna(0)\n",
|
||||
"train_data = train_data.drop('ERP', 1)\n",
|
||||
"train_data = train_data.fillna(0)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Creating ModelProxy for submitting prediction runs to the training environment.\n",
|
||||
"We will create a ModelProxy for the best child run, which will allow us to submit a run that does the prediction in the training environment. Unlike the local client, which can have different versions of some libraries, the training environment will have all the compatible libraries for the model already."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.model_proxy import ModelProxy\n",
|
||||
"best_model_proxy = ModelProxy(best_run)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_pred_train = best_model_proxy.predict(train_data).to_pandas_dataframe()\n",
|
||||
"y_residual_train = y_train - y_pred_train\n",
|
||||
"\n",
|
||||
"y_pred_test = best_model_proxy.predict(test_data).to_pandas_dataframe()\n",
|
||||
"y_residual_test = y_test - y_pred_test"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%matplotlib inline\n",
|
||||
"from sklearn.metrics import mean_squared_error, r2_score\n",
|
||||
"\n",
|
||||
"# Set up a multi-plot chart.\n",
|
||||
"f, (a0, a1) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[1, 1], 'wspace':0, 'hspace': 0})\n",
|
||||
"f.suptitle('Regression Residual Values', fontsize = 18)\n",
|
||||
"f.set_figheight(6)\n",
|
||||
"f.set_figwidth(16)\n",
|
||||
"\n",
|
||||
"# Plot residual values of training set.\n",
|
||||
"a0.axis([0, 360, -100, 100])\n",
|
||||
"a0.plot(y_residual_train, 'bo', alpha = 0.5)\n",
|
||||
"a0.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
||||
"a0.text(16,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_train, y_pred_train))), fontsize = 12)\n",
|
||||
"a0.text(16,140,'R2 score = {0:.2f}'.format(r2_score(y_train, y_pred_train)),fontsize = 12)\n",
|
||||
"a0.set_xlabel('Training samples', fontsize = 12)\n",
|
||||
"a0.set_ylabel('Residual Values', fontsize = 12)\n",
|
||||
"\n",
|
||||
"# Plot residual values of test set.\n",
|
||||
"a1.axis([0, 90, -100, 100])\n",
|
||||
"a1.plot(y_residual_test, 'bo', alpha = 0.5)\n",
|
||||
"a1.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
||||
"a1.text(5,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred_test))), fontsize = 12)\n",
|
||||
"a1.text(5,140,'R2 score = {0:.2f}'.format(r2_score(y_test, y_pred_test)),fontsize = 12)\n",
|
||||
"a1.set_xlabel('Test samples', fontsize = 12)\n",
|
||||
"a1.set_yticklabels([])\n",
|
||||
"\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(y_test, y_pred_test, color='')\n",
|
||||
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "rakellam"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"how-to-use-azureml",
|
||||
"automated-machine-learning"
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
name: auto-ml-regression-model-proxy
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
@@ -114,7 +114,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.13.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -87,7 +87,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.13.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.13.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -94,7 +94,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.13.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -82,7 +82,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.13.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -327,7 +327,7 @@
|
||||
"\n",
|
||||
"The featurization customization in forecasting is an advanced feature in AutoML which allows our customers to change the default forecasting featurization behaviors and column types through `FeaturizationConfig`. The supported scenarios include,\n",
|
||||
"1. Column purposes update: Override feature type for the specified column. Currently supports DateTime, Categorical and Numeric. This customization can be used in the scenario that the type of the column cannot correctly reflect its purpose. Some numerical columns, for instance, can be treated as Categorical columns which need to be converted to categorical while some can be treated as epoch timestamp which need to be converted to datetime. To tell our SDK to correctly preprocess these columns, a configuration need to be add with the columns and their desired types.\n",
|
||||
"2. Transformer parameters update: Currently supports parameter change for Imputer only. User can customize imputation methods, the supported methods are constant for target data and mean, median, most frequent and constant for training data. This customization can be used for the scenario that our customers know which imputation methods fit best to the input data. For instance, some datasets use NaN to represent 0 which the correct behavior should impute all the missing value with 0. To achieve this behavior, these columns need to be configured as constant imputation with `fill_value` 0.\n",
|
||||
"2. Transformer parameters update: Currently supports parameter change for Imputer only. User can customize imputation methods. The supported imputing methods for target column are constant and ffill (forward fill). The supported imputing methods for feature columns are mean, median, most frequent, constant and ffill (forward fill). This customization can be used for the scenario that our customers know which imputation methods fit best to the input data. For instance, some datasets use NaN to represent 0 which the correct behavior should impute all the missing value with 0. To achieve this behavior, these columns need to be configured as constant imputation with `fill_value` 0.\n",
|
||||
"3. Drop columns: Columns to drop from being featurized. These usually are the columns which are leaky or the columns contain no useful data.\n",
|
||||
"\n",
|
||||
"This step requires an Enterprise workspace to gain access to this feature. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page.](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade)"
|
||||
@@ -350,7 +350,9 @@
|
||||
"# Fill missing values in the target column, Quantity, with zeros.\n",
|
||||
"featurization_config.add_transformer_params('Imputer', ['Quantity'], {\"strategy\": \"constant\", \"fill_value\": 0})\n",
|
||||
"# Fill missing values in the INCOME column with median value.\n",
|
||||
"featurization_config.add_transformer_params('Imputer', ['INCOME'], {\"strategy\": \"median\"})"
|
||||
"featurization_config.add_transformer_params('Imputer', ['INCOME'], {\"strategy\": \"median\"})\n",
|
||||
"# Fill missing values in the Price column with forward fill (last value carried forward).\n",
|
||||
"featurization_config.add_transformer_params('Imputer', ['Price'], {\"strategy\": \"ffill\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -96,7 +96,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.13.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -562,16 +562,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile score.py\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"import os\n",
|
||||
"import pickle\n",
|
||||
"import azureml.train.automl\n",
|
||||
"import azureml.interpret\n",
|
||||
"from azureml.train.automl.runtime.automl_explain_utilities import AutoMLExplainerSetupClass, \\\n",
|
||||
" automl_setup_model_explanations\n",
|
||||
"import joblib\n",
|
||||
"import pandas as pd\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def init():\n",
|
||||
|
||||
@@ -98,7 +98,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.13.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,14 +1,7 @@
|
||||
import json
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import os
|
||||
import pickle
|
||||
import azureml.train.automl
|
||||
import azureml.interpret
|
||||
from azureml.train.automl.runtime.automl_explain_utilities import AutoMLExplainerSetupClass, \
|
||||
automl_setup_model_explanations
|
||||
import joblib
|
||||
from azureml.core.model import Model
|
||||
from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations
|
||||
|
||||
|
||||
def init():
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
# Copyright (c) Microsoft. All rights reserved.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import joblib
|
||||
|
||||
from azureml.core.run import Run
|
||||
from interpret.ext.glassbox import LGBMExplainableModel
|
||||
from automl.client.core.common.constants import MODEL_PATH
|
||||
from azureml.core.experiment import Experiment
|
||||
from azureml.core.dataset import Dataset
|
||||
from azureml.train.automl.runtime.automl_explain_utilities import AutoMLExplainerSetupClass, \
|
||||
automl_setup_model_explanations, automl_check_model_if_explainable
|
||||
from interpret.ext.glassbox import LGBMExplainableModel
|
||||
from azureml.core.run import Run
|
||||
from azureml.interpret.mimic_wrapper import MimicWrapper
|
||||
from automl.client.core.common.constants import MODEL_PATH
|
||||
from azureml.interpret.scoring.scoring_explainer import TreeScoringExplainer
|
||||
import joblib
|
||||
from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations, \
|
||||
automl_check_model_if_explainable
|
||||
|
||||
|
||||
OUTPUT_DIR = './outputs/'
|
||||
|
||||
@@ -92,7 +92,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.13.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -334,14 +334,27 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Use the default configuration (can also provide parameters to customize)\n",
|
||||
"prov_config = AksCompute.provisioning_configuration()\n",
|
||||
"from azureml.core.compute import ComputeTarget\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your AKS cluster\n",
|
||||
"aks_name = 'my-aks-9' \n",
|
||||
"# Create the cluster\n",
|
||||
"aks_target = ComputeTarget.create(workspace = ws, \n",
|
||||
" name = aks_name, \n",
|
||||
" provisioning_configuration = prov_config)"
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" aks_target = ComputeTarget(workspace=ws, name=aks_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" # Use the default configuration (can also provide parameters to customize)\n",
|
||||
" prov_config = AksCompute.provisioning_configuration()\n",
|
||||
"\n",
|
||||
" # Create the cluster\n",
|
||||
" aks_target = ComputeTarget.create(workspace = ws, \n",
|
||||
" name = aks_name, \n",
|
||||
" provisioning_configuration = prov_config)\n",
|
||||
"\n",
|
||||
"if aks_target.get_status() != \"Succeeded\":\n",
|
||||
" aks_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -29,8 +29,8 @@ print("Argument 2(output final transformed taxi data): %s" % args.output_transfo
|
||||
# use the drop_columns() function to delete the original fields as the newly generated features are preferred.
|
||||
# Rename the rest of the fields to use meaningful descriptions.
|
||||
|
||||
normalized_df = normalized_df.astype({"pickup_date": 'datetime64', "dropoff_date": 'datetime64',
|
||||
"pickup_time": 'datetime64', "dropoff_time": 'datetime64',
|
||||
normalized_df = normalized_df.astype({"pickup_date": 'datetime64[ns]', "dropoff_date": 'datetime64[ns]',
|
||||
"pickup_time": 'datetime64[us]', "dropoff_time": 'datetime64[us]',
|
||||
"distance": 'float64', "cost": 'float64'})
|
||||
|
||||
normalized_df["pickup_weekday"] = normalized_df["pickup_date"].dt.dayofweek
|
||||
|
||||
@@ -418,7 +418,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.hyperdrive.runconfig import HyperDriveRunConfig\n",
|
||||
"from azureml.train.hyperdrive.runconfig import HyperDriveConfig\n",
|
||||
"from azureml.train.hyperdrive.sampling import RandomParameterSampling\n",
|
||||
"from azureml.train.hyperdrive.run import PrimaryMetricGoal\n",
|
||||
"from azureml.train.hyperdrive.parameter_expressions import choice\n",
|
||||
@@ -430,12 +430,12 @@
|
||||
" }\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"hyperdrive_run_config = HyperDriveRunConfig(estimator=estimator,\n",
|
||||
" hyperparameter_sampling=param_sampling, \n",
|
||||
" primary_metric_name='Accuracy',\n",
|
||||
" primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,\n",
|
||||
" max_total_runs=12,\n",
|
||||
" max_concurrent_runs=4)"
|
||||
"hyperdrive_config = HyperDriveConfig(estimator=estimator,\n",
|
||||
" hyperparameter_sampling=param_sampling, \n",
|
||||
" primary_metric_name='Accuracy',\n",
|
||||
" primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,\n",
|
||||
" max_total_runs=12,\n",
|
||||
" max_concurrent_runs=4)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -452,7 +452,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# start the HyperDrive run\n",
|
||||
"hyperdrive_run = experiment.submit(hyperdrive_run_config)"
|
||||
"hyperdrive_run = experiment.submit(hyperdrive_config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -158,7 +158,7 @@
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"\n",
|
||||
"web_paths = ['http://mattmahoney.net/dc/text8.zip']\n",
|
||||
"web_paths = ['https://azureopendatastorage.blob.core.windows.net/testpublic/text8.zip']\n",
|
||||
"dataset = Dataset.File.from_files(path=web_paths)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
# imports
|
||||
import pickle
|
||||
from datetime import datetime
|
||||
from azureml.opendatasets import NoaaIsdWeather
|
||||
from sklearn.linear_model import LinearRegression
|
||||
|
||||
# get weather dataset
|
||||
start = datetime(2019, 1, 1)
|
||||
end = datetime(2019, 1, 14)
|
||||
isd = NoaaIsdWeather(start, end)
|
||||
|
||||
# convert to pandas dataframe and filter down
|
||||
df = isd.to_pandas_dataframe().fillna(0)
|
||||
df = df[df['stationName'].str.contains('FLORIDA', regex=True, na=False)]
|
||||
|
||||
# features for training
|
||||
X_features = ['latitude', 'longitude', 'temperature', 'windAngle', 'windSpeed']
|
||||
y_features = ['elevation']
|
||||
|
||||
# write the training dataset to csv
|
||||
training_dataset = df[X_features + y_features]
|
||||
training_dataset.to_csv('training.csv', index=False)
|
||||
|
||||
# train the model
|
||||
X = training_dataset[X_features]
|
||||
y = training_dataset[y_features]
|
||||
model = LinearRegression().fit(X, y)
|
||||
|
||||
# save the model as a .pkl file
|
||||
with open('elevation-regression-model.pkl', 'wb') as f:
|
||||
pickle.dump(model, f)
|
||||
@@ -1,346 +0,0 @@
|
||||
latitude,longitude,temperature,windAngle,windSpeed,elevation
|
||||
26.536,-81.755,17.8,10.0,2.1,9.0
|
||||
26.536,-81.755,16.7,360.0,1.5,9.0
|
||||
26.536,-81.755,16.1,350.0,1.5,9.0
|
||||
26.536,-81.755,15.0,0.0,0.0,9.0
|
||||
26.536,-81.755,14.4,350.0,1.5,9.0
|
||||
26.536,-81.755,0.0,0.0,0.0,9.0
|
||||
26.536,-81.755,13.9,360.0,2.1,9.0
|
||||
26.536,-81.755,13.3,350.0,1.5,9.0
|
||||
26.536,-81.755,13.3,10.0,2.1,9.0
|
||||
26.536,-81.755,13.3,360.0,1.5,9.0
|
||||
26.536,-81.755,13.3,0.0,0.0,9.0
|
||||
26.536,-81.755,12.2,0.0,0.0,9.0
|
||||
26.536,-81.755,11.7,0.0,0.0,9.0
|
||||
26.536,-81.755,14.4,0.0,0.0,9.0
|
||||
26.536,-81.755,17.2,10.0,2.6,9.0
|
||||
26.536,-81.755,20.0,20.0,2.6,9.0
|
||||
26.536,-81.755,22.2,10.0,3.6,9.0
|
||||
26.536,-81.755,23.3,30.0,4.6,9.0
|
||||
26.536,-81.755,23.3,330.0,2.6,9.0
|
||||
26.536,-81.755,24.4,0.0,0.0,9.0
|
||||
26.536,-81.755,25.0,360.0,3.1,9.0
|
||||
26.536,-81.755,24.4,20.0,4.1,9.0
|
||||
26.536,-81.755,23.3,10.0,2.6,9.0
|
||||
26.536,-81.755,21.1,30.0,2.1,9.0
|
||||
26.536,-81.755,18.3,0.0,0.0,9.0
|
||||
26.536,-81.755,17.2,30.0,2.1,9.0
|
||||
26.536,-81.755,15.6,60.0,2.6,9.0
|
||||
26.536,-81.755,15.6,0.0,0.0,9.0
|
||||
26.536,-81.755,13.9,60.0,2.6,9.0
|
||||
26.536,-81.755,12.8,70.0,2.6,9.0
|
||||
26.536,-81.755,0.0,0.0,0.0,9.0
|
||||
26.536,-81.755,11.7,70.0,2.1,9.0
|
||||
26.536,-81.755,12.2,20.0,2.1,9.0
|
||||
26.536,-81.755,11.7,30.0,1.5,9.0
|
||||
26.536,-81.755,11.1,40.0,2.1,9.0
|
||||
26.536,-81.755,12.2,40.0,2.6,9.0
|
||||
26.536,-81.755,12.2,30.0,2.6,9.0
|
||||
26.536,-81.755,12.2,0.0,0.0,9.0
|
||||
26.536,-81.755,15.0,30.0,6.2,9.0
|
||||
26.536,-81.755,17.2,50.0,3.6,9.0
|
||||
26.536,-81.755,20.6,60.0,5.1,9.0
|
||||
26.536,-81.755,22.8,50.0,4.6,9.0
|
||||
26.536,-81.755,24.4,80.0,6.2,9.0
|
||||
26.536,-81.755,25.0,100.0,5.7,9.0
|
||||
26.536,-81.755,25.6,60.0,3.1,9.0
|
||||
26.536,-81.755,25.6,80.0,4.6,9.0
|
||||
26.536,-81.755,25.0,90.0,5.1,9.0
|
||||
26.536,-81.755,24.4,80.0,5.1,9.0
|
||||
26.536,-81.755,21.1,60.0,2.6,9.0
|
||||
26.536,-81.755,19.4,70.0,3.6,9.0
|
||||
26.536,-81.755,18.3,70.0,2.6,9.0
|
||||
26.536,-81.755,18.3,80.0,2.6,9.0
|
||||
26.536,-81.755,17.2,60.0,1.5,9.0
|
||||
26.536,-81.755,16.1,70.0,2.6,9.0
|
||||
26.536,-81.755,15.6,70.0,2.6,9.0
|
||||
26.536,-81.755,0.0,0.0,0.0,9.0
|
||||
26.536,-81.755,16.1,50.0,2.6,9.0
|
||||
26.536,-81.755,15.6,50.0,2.1,9.0
|
||||
26.536,-81.755,15.0,50.0,1.5,9.0
|
||||
26.536,-81.755,15.0,0.0,0.0,9.0
|
||||
26.536,-81.755,15.0,0.0,0.0,9.0
|
||||
26.536,-81.755,14.4,0.0,0.0,9.0
|
||||
26.536,-81.755,14.4,30.0,4.1,9.0
|
||||
26.536,-81.755,16.1,40.0,1.5,9.0
|
||||
26.536,-81.755,19.4,0.0,1.5,9.0
|
||||
26.536,-81.755,22.8,90.0,2.6,9.0
|
||||
26.536,-81.755,24.4,130.0,3.6,9.0
|
||||
26.536,-81.755,25.6,100.0,4.6,9.0
|
||||
26.536,-81.755,26.1,120.0,3.1,9.0
|
||||
26.536,-81.755,26.7,0.0,2.6,9.0
|
||||
26.536,-81.755,27.2,0.0,0.0,9.0
|
||||
26.536,-81.755,27.2,40.0,3.1,9.0
|
||||
26.536,-81.755,26.1,30.0,1.5,9.0
|
||||
26.536,-81.755,22.8,310.0,2.1,9.0
|
||||
26.536,-81.755,23.3,330.0,2.1,9.0
|
||||
-34.067,-56.238,17.5,30.0,3.1,68.0
|
||||
-34.067,-56.238,21.2,30.0,5.7,68.0
|
||||
-34.067,-56.238,24.5,30.0,3.1,68.0
|
||||
-34.067,-56.238,27.5,330.0,3.6,68.0
|
||||
-34.067,-56.238,29.2,30.0,4.1,68.0
|
||||
-34.067,-56.238,31.0,20.0,4.6,68.0
|
||||
-34.067,-56.238,33.0,360.0,2.6,68.0
|
||||
-34.067,-56.238,33.6,60.0,3.1,68.0
|
||||
-34.067,-56.238,33.6,30.0,3.6,68.0
|
||||
-34.067,-56.238,18.6,40.0,3.1,68.0
|
||||
-34.067,-56.238,22.0,120.0,1.5,68.0
|
||||
-34.067,-56.238,25.0,120.0,2.6,68.0
|
||||
-34.067,-56.238,28.6,50.0,3.1,68.0
|
||||
-34.067,-56.238,30.6,50.0,4.1,68.0
|
||||
-34.067,-56.238,31.5,30.0,6.7,68.0
|
||||
-34.067,-56.238,32.0,40.0,7.2,68.0
|
||||
-34.067,-56.238,33.0,30.0,5.7,68.0
|
||||
-34.067,-56.238,33.2,360.0,3.6,68.0
|
||||
-34.067,-56.238,20.6,30.0,3.1,68.0
|
||||
-34.067,-56.238,21.2,0.0,0.0,68.0
|
||||
-34.067,-56.238,22.0,210.0,3.1,68.0
|
||||
-34.067,-56.238,23.0,210.0,3.6,68.0
|
||||
-34.067,-56.238,24.0,180.0,6.7,68.0
|
||||
-34.067,-56.238,24.5,210.0,7.2,68.0
|
||||
-34.067,-56.238,21.0,180.0,8.2,68.0
|
||||
-34.067,-56.238,20.0,180.0,6.7,68.0
|
||||
-34.083,-56.233,20.2,180.0,7.2,68.0
|
||||
-29.917,-71.2,16.6,290.0,4.1,146.0
|
||||
-29.916,-71.2,17.0,290.0,4.1,147.0
|
||||
-29.916,-71.2,16.0,310.0,3.1,147.0
|
||||
-29.916,-71.2,16.0,300.0,2.1,147.0
|
||||
-29.917,-71.2,15.1,0.0,0.0,146.0
|
||||
-29.916,-71.2,15.0,0.0,1.0,147.0
|
||||
-29.916,-71.2,15.0,160.0,1.0,147.0
|
||||
-29.916,-71.2,15.0,120.0,1.0,147.0
|
||||
-29.917,-71.2,14.3,190.0,1.0,146.0
|
||||
-29.916,-71.2,14.0,190.0,1.0,147.0
|
||||
-29.916,-71.2,14.0,0.0,0.0,147.0
|
||||
-29.916,-71.2,14.0,100.0,3.1,147.0
|
||||
-29.917,-71.2,12.9,0.0,0.0,146.0
|
||||
-29.916,-71.2,13.0,0.0,1.0,147.0
|
||||
-29.916,-71.2,14.0,0.0,0.5,147.0
|
||||
-29.916,-71.2,15.0,0.0,0.5,147.0
|
||||
-29.917,-71.2,15.9,0.0,0.0,146.0
|
||||
-29.916,-71.2,16.0,0.0,0.0,147.0
|
||||
-29.916,-71.2,17.0,270.0,4.6,147.0
|
||||
-29.916,-71.2,19.0,260.0,4.1,147.0
|
||||
-29.917,-71.2,18.1,270.0,6.2,146.0
|
||||
-29.916,-71.2,18.0,270.0,6.2,147.0
|
||||
-29.916,-71.2,19.0,270.0,6.2,147.0
|
||||
-29.916,-71.2,20.0,260.0,5.1,147.0
|
||||
-29.917,-71.2,19.6,280.0,6.2,146.0
|
||||
-29.916,-71.2,20.0,280.0,6.2,147.0
|
||||
-29.916,-71.2,20.0,270.0,6.2,147.0
|
||||
-29.916,-71.2,19.0,280.0,6.7,147.0
|
||||
-29.917,-71.2,18.3,270.0,5.7,146.0
|
||||
-29.916,-71.2,18.0,270.0,5.7,147.0
|
||||
-29.916,-71.2,18.0,0.0,0.0,147.0
|
||||
-29.916,-71.2,17.0,280.0,4.6,147.0
|
||||
-29.917,-71.2,15.9,280.0,4.1,146.0
|
||||
-29.916,-71.2,16.0,280.0,4.1,147.0
|
||||
-29.916,-71.2,15.0,280.0,3.6,147.0
|
||||
-29.916,-71.2,15.0,280.0,3.6,147.0
|
||||
-29.917,-71.2,15.4,280.0,4.1,146.0
|
||||
-29.916,-71.2,15.0,280.0,4.1,147.0
|
||||
-29.916,-71.2,16.0,240.0,2.1,147.0
|
||||
-29.916,-71.2,15.0,0.0,0.5,147.0
|
||||
-29.917,-71.2,15.8,80.0,3.6,146.0
|
||||
-29.916,-71.2,16.0,80.0,3.6,147.0
|
||||
-29.916,-71.2,16.0,10.0,1.5,147.0
|
||||
-29.916,-71.2,16.0,100.0,1.5,147.0
|
||||
-29.917,-71.2,15.3,130.0,1.5,146.0
|
||||
-29.916,-71.2,15.0,130.0,1.5,147.0
|
||||
-29.916,-71.2,15.0,110.0,1.0,147.0
|
||||
-29.916,-71.2,16.0,280.0,6.2,147.0
|
||||
-29.917,-71.2,15.9,240.0,3.6,146.0
|
||||
-29.916,-71.2,16.0,240.0,3.6,147.0
|
||||
-29.916,-71.2,16.0,240.0,3.1,147.0
|
||||
-29.916,-71.2,16.0,220.0,3.1,147.0
|
||||
-29.917,-71.2,16.4,260.0,3.1,146.0
|
||||
-29.916,-71.2,16.0,260.0,3.1,147.0
|
||||
-29.916,-71.2,17.0,230.0,2.6,147.0
|
||||
-29.916,-71.2,18.0,0.0,1.5,147.0
|
||||
-29.917,-71.2,20.3,340.0,2.6,146.0
|
||||
-29.916,-71.2,20.0,340.0,2.6,147.0
|
||||
-29.916,-71.2,21.0,270.0,5.1,147.0
|
||||
-29.916,-71.2,20.0,270.0,6.7,147.0
|
||||
-29.917,-71.2,19.2,280.0,6.7,146.0
|
||||
-29.916,-71.2,19.0,280.0,6.7,147.0
|
||||
-29.916,-71.2,19.0,310.0,2.6,147.0
|
||||
-29.916,-71.2,18.0,270.0,5.1,147.0
|
||||
-29.917,-71.2,17.0,300.0,4.6,146.0
|
||||
-29.916,-71.2,17.0,300.0,4.6,147.0
|
||||
-29.916,-71.2,17.0,300.0,3.6,147.0
|
||||
-29.916,-71.2,17.0,290.0,3.1,147.0
|
||||
-29.917,-71.2,16.3,290.0,2.1,146.0
|
||||
-29.916,-71.2,16.0,290.0,2.1,147.0
|
||||
-29.916,-71.2,17.0,270.0,1.0,147.0
|
||||
-29.916,-71.2,17.0,0.0,0.5,147.0
|
||||
-29.917,-71.2,16.5,160.0,2.1,146.0
|
||||
-29.916,-71.2,17.0,160.0,2.1,147.0
|
||||
-29.916,-71.2,15.0,120.0,3.1,147.0
|
||||
-29.916,-71.2,16.0,180.0,1.5,147.0
|
||||
-29.917,-71.2,14.7,0.0,0.0,146.0
|
||||
-29.916,-71.2,15.0,0.0,1.0,147.0
|
||||
-29.916,-71.2,15.0,300.0,1.0,147.0
|
||||
-29.916,-71.2,16.0,0.0,0.0,147.0
|
||||
-29.917,-71.2,18.5,110.0,1.0,146.0
|
||||
-29.916,-71.2,19.0,110.0,1.0,147.0
|
||||
-29.916,-71.2,20.0,270.0,3.6,147.0
|
||||
-29.916,-71.2,20.0,270.0,5.7,147.0
|
||||
-29.917,-71.2,20.0,280.0,6.2,146.0
|
||||
-29.916,-71.2,20.0,280.0,6.2,147.0
|
||||
-29.916,-71.2,21.0,290.0,6.7,147.0
|
||||
-29.916,-71.2,20.0,270.0,6.2,147.0
|
||||
-29.917,-71.2,21.0,260.0,6.7,146.0
|
||||
-29.916,-71.2,21.0,260.0,6.7,147.0
|
||||
-29.916,-71.2,20.0,270.0,6.2,147.0
|
||||
-29.916,-71.2,19.0,260.0,5.1,147.0
|
||||
-29.916,-71.2,18.0,280.0,4.6,147.0
|
||||
-29.917,-71.2,17.5,280.0,3.1,146.0
|
||||
-29.916,-71.2,18.0,280.0,3.1,147.0
|
||||
30.349,-85.788,11.1,0.0,0.0,21.0
|
||||
30.349,-85.788,11.1,0.0,0.0,21.0
|
||||
30.349,-85.788,9.4,0.0,0.0,21.0
|
||||
30.349,-85.788,9.4,0.0,0.0,21.0
|
||||
30.349,-85.788,8.3,300.0,2.1,21.0
|
||||
30.349,-85.788,11.1,280.0,1.5,21.0
|
||||
30.349,-85.788,0.0,0.0,0.0,21.0
|
||||
30.349,-85.788,10.6,320.0,3.1,21.0
|
||||
30.349,-85.788,9.4,310.0,3.1,21.0
|
||||
30.349,-85.788,7.8,320.0,2.6,21.0
|
||||
30.349,-85.788,6.1,340.0,2.1,21.0
|
||||
30.349,-85.788,6.7,330.0,2.6,21.0
|
||||
30.349,-85.788,6.1,310.0,1.5,21.0
|
||||
30.349,-85.788,7.2,310.0,2.1,21.0
|
||||
30.349,-85.788,12.8,360.0,3.1,21.0
|
||||
30.349,-85.788,15.0,0.0,3.1,21.0
|
||||
30.349,-85.788,16.7,20.0,4.6,21.0
|
||||
30.349,-85.788,18.9,30.0,5.1,21.0
|
||||
30.349,-85.788,19.4,10.0,4.1,21.0
|
||||
30.349,-85.788,21.1,330.0,2.6,21.0
|
||||
30.349,-85.788,21.1,10.0,4.6,21.0
|
||||
30.349,-85.788,21.7,360.0,4.1,21.0
|
||||
30.349,-85.788,21.7,30.0,2.1,21.0
|
||||
30.349,-85.788,21.7,330.0,2.6,21.0
|
||||
30.349,-85.788,16.1,350.0,2.1,21.0
|
||||
30.349,-85.788,11.7,0.0,0.0,21.0
|
||||
30.349,-85.788,8.9,0.0,0.0,21.0
|
||||
30.349,-85.788,9.4,0.0,0.0,21.0
|
||||
30.349,-85.788,7.8,0.0,0.0,21.0
|
||||
30.349,-85.788,11.1,30.0,3.1,21.0
|
||||
30.349,-85.788,7.2,0.0,0.0,21.0
|
||||
30.349,-85.788,7.2,0.0,0.0,21.0
|
||||
30.349,-85.788,0.0,0.0,0.0,21.0
|
||||
30.349,-85.788,7.8,30.0,2.1,21.0
|
||||
30.349,-85.788,8.3,40.0,2.6,21.0
|
||||
30.349,-85.788,7.2,50.0,1.5,21.0
|
||||
30.349,-85.788,8.3,60.0,1.5,21.0
|
||||
30.349,-85.788,5.6,40.0,2.1,21.0
|
||||
30.349,-85.788,6.7,40.0,2.1,21.0
|
||||
30.349,-85.788,7.8,50.0,3.1,21.0
|
||||
30.349,-85.788,11.7,70.0,2.6,21.0
|
||||
30.349,-85.788,15.6,70.0,3.1,21.0
|
||||
30.349,-85.788,18.9,100.0,3.6,21.0
|
||||
30.349,-85.788,20.0,130.0,3.6,21.0
|
||||
30.349,-85.788,21.1,140.0,4.1,21.0
|
||||
30.349,-85.788,21.7,150.0,4.1,21.0
|
||||
30.349,-85.788,21.7,170.0,3.1,21.0
|
||||
30.349,-85.788,22.2,170.0,3.1,21.0
|
||||
30.349,-85.788,20.6,0.0,0.0,21.0
|
||||
30.349,-85.788,17.2,0.0,0.0,21.0
|
||||
30.349,-85.788,14.4,0.0,0.0,21.0
|
||||
30.349,-85.788,12.8,100.0,1.5,21.0
|
||||
30.349,-85.788,13.3,100.0,1.5,21.0
|
||||
30.349,-85.788,10.6,0.0,0.0,21.0
|
||||
30.349,-85.788,9.4,0.0,0.0,21.0
|
||||
30.349,-85.788,7.8,0.0,0.0,21.0
|
||||
30.358,-85.799,8.3,0.0,0.0,21.0
|
||||
30.349,-85.788,0.0,0.0,0.0,21.0
|
||||
30.358,-85.799,6.7,0.0,0.0,21.0
|
||||
30.358,-85.799,7.2,0.0,0.0,21.0
|
||||
30.358,-85.799,7.2,0.0,0.0,21.0
|
||||
30.358,-85.799,8.3,50.0,1.5,21.0
|
||||
30.358,-85.799,9.4,0.0,0.0,21.0
|
||||
30.358,-85.799,8.9,0.0,0.0,21.0
|
||||
30.358,-85.799,10.0,340.0,1.5,21.0
|
||||
30.358,-85.799,12.8,40.0,1.5,21.0
|
||||
30.358,-85.799,16.7,100.0,2.1,21.0
|
||||
30.358,-85.799,21.1,100.0,1.5,21.0
|
||||
30.358,-85.799,23.3,0.0,0.0,21.0
|
||||
30.358,-85.799,25.0,180.0,4.6,21.0
|
||||
30.358,-85.799,24.4,230.0,3.6,21.0
|
||||
30.358,-85.799,25.0,210.0,4.1,21.0
|
||||
30.358,-85.799,23.9,170.0,4.1,21.0
|
||||
30.358,-85.799,22.8,0.0,0.0,21.0
|
||||
30.358,-85.799,19.4,0.0,0.0,21.0
|
||||
30.358,-85.799,17.8,140.0,2.1,21.0
|
||||
60.383,5.333,-0.7,0.0,0.0,36.0
|
||||
60.383,5.333,0.6,270.0,2.0,36.0
|
||||
60.383,5.333,-0.9,120.0,1.0,36.0
|
||||
60.383,5.333,-1.6,130.0,2.0,36.0
|
||||
60.383,5.333,-1.4,150.0,1.0,36.0
|
||||
60.383,5.333,-1.7,0.0,0.0,36.0
|
||||
60.383,5.333,-1.7,140.0,1.0,36.0
|
||||
60.383,5.333,-1.4,0.0,0.0,36.0
|
||||
60.383,5.333,-1.0,0.0,0.0,36.0
|
||||
60.383,5.333,-1.0,150.0,1.0,36.0
|
||||
60.383,5.333,-0.7,140.0,1.0,36.0
|
||||
60.383,5.333,0.5,150.0,1.0,36.0
|
||||
60.383,5.333,1.9,0.0,0.0,36.0
|
||||
60.383,5.333,1.7,0.0,0.0,36.0
|
||||
60.383,5.333,2.1,310.0,2.0,36.0
|
||||
60.383,5.333,1.5,90.0,1.0,36.0
|
||||
60.383,5.333,1.9,290.0,1.0,36.0
|
||||
60.383,5.333,2.0,320.0,1.0,36.0
|
||||
60.383,5.333,1.9,330.0,1.0,36.0
|
||||
60.383,5.333,1.3,350.0,1.0,36.0
|
||||
60.383,5.333,1.5,120.0,1.0,36.0
|
||||
60.383,5.333,1.3,150.0,2.0,36.0
|
||||
60.383,5.333,0.8,140.0,1.0,36.0
|
||||
60.383,5.333,0.3,300.0,1.0,36.0
|
||||
60.383,5.333,0.2,140.0,1.0,36.0
|
||||
60.383,5.333,0.4,140.0,1.0,36.0
|
||||
60.383,5.333,0.5,320.0,1.0,36.0
|
||||
60.383,5.333,1.5,330.0,1.0,36.0
|
||||
60.383,5.333,1.8,40.0,1.0,36.0
|
||||
60.383,5.333,2.3,170.0,1.0,36.0
|
||||
60.383,5.333,2.7,140.0,1.0,36.0
|
||||
60.383,5.333,3.1,330.0,1.0,36.0
|
||||
60.383,5.333,3.8,350.0,1.0,36.0
|
||||
60.383,5.333,3.8,140.0,1.0,36.0
|
||||
60.383,5.333,4.1,150.0,1.0,36.0
|
||||
60.383,5.333,4.4,180.0,1.0,36.0
|
||||
60.383,5.333,4.9,300.0,1.0,36.0
|
||||
60.383,5.333,5.2,320.0,1.0,36.0
|
||||
60.383,5.333,6.7,340.0,1.0,36.0
|
||||
60.383,5.333,6.9,250.0,1.0,36.0
|
||||
60.383,5.333,7.9,300.0,2.0,36.0
|
||||
60.383,5.333,5.5,140.0,1.0,36.0
|
||||
60.383,5.333,7.1,140.0,2.0,36.0
|
||||
60.383,5.333,7.0,280.0,2.0,36.0
|
||||
60.383,5.333,4.6,170.0,1.0,36.0
|
||||
60.383,5.333,4.8,330.0,1.0,36.0
|
||||
60.383,5.333,6.4,260.0,2.0,36.0
|
||||
60.383,5.333,6.2,340.0,1.0,36.0
|
||||
60.383,5.333,5.7,320.0,2.0,36.0
|
||||
60.383,5.333,5.2,100.0,1.0,36.0
|
||||
60.383,5.333,5.1,310.0,1.0,36.0
|
||||
60.383,5.333,4.9,290.0,2.0,36.0
|
||||
60.383,5.333,4.9,310.0,2.0,36.0
|
||||
60.383,5.333,6.1,320.0,2.0,36.0
|
||||
60.383,5.333,7.0,250.0,1.0,36.0
|
||||
60.383,5.333,5.3,140.0,1.0,36.0
|
||||
60.383,5.333,6.9,350.0,1.0,36.0
|
||||
60.383,5.333,9.7,110.0,3.0,36.0
|
||||
60.383,5.333,10.3,300.0,3.0,36.0
|
||||
60.383,5.333,8.7,310.0,1.0,36.0
|
||||
60.383,5.333,9.0,270.0,3.0,36.0
|
||||
60.383,5.333,11.6,80.0,3.0,36.0
|
||||
60.383,5.333,11.4,80.0,4.0,36.0
|
||||
60.383,5.333,9.7,70.0,5.0,36.0
|
||||
60.383,5.333,9.5,80.0,6.0,36.0
|
||||
60.383,5.333,8.7,80.0,5.0,36.0
|
||||
60.383,5.333,7.7,80.0,5.0,36.0
|
||||
60.383,5.333,8.2,80.0,4.0,36.0
|
||||
60.383,5.333,7.7,30.0,1.0,36.0
|
||||
60.383,5.333,7.2,310.0,1.0,36.0
|
||||
60.383,5.333,6.8,300.0,2.0,36.0
|
||||
60.383,5.333,6.7,140.0,1.0,36.0
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,578 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Monitor data drift on models deployed to Azure Kubernetes Service \n",
|
||||
"\n",
|
||||
"In this tutorial, you will setup a data drift monitor on a toy model that predicts elevation based on a few weather factors which will send email alerts if drift is detected."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"If you are using an Azure Machine Learning Compute instance, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) first if you haven't already established your connection to the AzureML Workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Check core SDK version number\n",
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print('SDK version:', azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize Workspace\n",
|
||||
"\n",
|
||||
"Initialize a workspace object from persisted configuration."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"ws"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup training dataset and model\n",
|
||||
"\n",
|
||||
"Setup the training dataset and model in preparation for deployment to the Azure Kubernetes Service. \n",
|
||||
"\n",
|
||||
"The next few cells will:\n",
|
||||
" * get the default datastore and upload the `training.csv` dataset to the datastore\n",
|
||||
" * create and register the dataset \n",
|
||||
" * register the model with the dataset\n",
|
||||
" \n",
|
||||
"See the `config.py` script in this folder for details on how `training.csv` and `elevation-regression-model.pkl` are created. If you train your model in Azure ML using a Dataset, it will be automatically captured when registering the model from the run. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use default datastore\n",
|
||||
"dstore = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"# upload weather data\n",
|
||||
"dstore.upload('dataset', 'drift-on-aks-data', overwrite=True, show_progress=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"\n",
|
||||
"# create dataset \n",
|
||||
"dset = Dataset.Tabular.from_delimited_files(dstore.path('drift-on-aks-data/training.csv'))\n",
|
||||
"# register dataset\n",
|
||||
"dset = dset.register(ws, 'drift-demo-dataset')\n",
|
||||
"# get the dataset by name from the workspace\n",
|
||||
"dset = Dataset.get_by_name(ws, 'drift-demo-dataset')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.model import Model\n",
|
||||
"\n",
|
||||
"# register the model\n",
|
||||
"model = Model.register(model_path='elevation-regression-model.pkl',\n",
|
||||
" model_name='elevation-regression-model.pkl',\n",
|
||||
" tags={'area': \"weather\", 'type': \"linear regression\"},\n",
|
||||
" description='Linear regression model to predict elevation based on the weather',\n",
|
||||
" workspace=ws,\n",
|
||||
" datasets=[(Dataset.Scenario.TRAINING, dset)]) # need to register the dataset with the model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the inference config\n",
|
||||
"\n",
|
||||
"Create the environment and inference config from the `myenv.yml` and `score.py` files. Notice the [Model Data Collector](https://docs.microsoft.com/azure/machine-learning/service/how-to-enable-data-collection) code included in the scoring script. This dependency is currently required to collect model data, but will be removed in the near future as data collection in Azure Machine Learning webservice endpoints is automated."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"# create the environment from the yml file \n",
|
||||
"env = Environment.from_conda_specification(name='deploytocloudenv', file_path='myenv.yml')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.model import InferenceConfig\n",
|
||||
"\n",
|
||||
"# create an inference config, combining the environment and entry script \n",
|
||||
"inference_config = InferenceConfig(entry_script='score.py', environment=env)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the AKS compute target\n",
|
||||
"\n",
|
||||
"Create an Azure Kubernetes Service compute target to deploy the model to. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import AksCompute, ComputeTarget\n",
|
||||
"\n",
|
||||
"# Use the default configuration (you can also provide parameters to customize this).\n",
|
||||
"# For example, to create a dev/test cluster, use:\n",
|
||||
"# prov_config = AksCompute.provisioning_configuration(cluster_purpose = AksCompute.ClusterPurpose.DEV_TEST)\n",
|
||||
"prov_config = AksCompute.provisioning_configuration()\n",
|
||||
"\n",
|
||||
"aks_name = 'drift-aks'\n",
|
||||
"aks_target = ws.compute_targets.get(aks_name)\n",
|
||||
"\n",
|
||||
"# Create the cluster\n",
|
||||
"if not aks_target:\n",
|
||||
" aks_target = ComputeTarget.create(workspace = ws,\n",
|
||||
" name = aks_name,\n",
|
||||
" provisioning_configuration = prov_config)\n",
|
||||
"\n",
|
||||
" # Wait for the create process to complete\n",
|
||||
" aks_target.wait_for_completion(show_output = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Deploy the model to AKS \n",
|
||||
"\n",
|
||||
"Deploy the model as a webservice endpoint. Be sure to enable the `collect_model_data` flag so that serving data is collected in blob storage for use by the data drift capability."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.webservice import AksWebservice\n",
|
||||
"\n",
|
||||
"deployment_config = AksWebservice.deploy_configuration(cpu_cores=1, memory_gb=1, collect_model_data=True)\n",
|
||||
"service_name = 'drift-aks-service'\n",
|
||||
"\n",
|
||||
"service = Model.deploy(ws, service_name, [model], inference_config, deployment_config, aks_target)\n",
|
||||
"\n",
|
||||
"service.wait_for_deployment(True)\n",
|
||||
"print(service.state)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run recent weather data through the webservice \n",
|
||||
"\n",
|
||||
"The below cells take the weather data of Florida from 2019-11-20 to 2019-11-12, filter and transform using the same processes as the training dataset, and runs the data through the service."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# create dataset \n",
|
||||
"tset = Dataset.Tabular.from_delimited_files(dstore.path('drift-on-aks-data/testing.csv'))\n",
|
||||
"\n",
|
||||
"df = tset.to_pandas_dataframe().fillna(0)\n",
|
||||
"\n",
|
||||
"X_features = ['latitude', 'longitude', 'temperature', 'windAngle', 'windSpeed']\n",
|
||||
"y_features = ['elevation']\n",
|
||||
"\n",
|
||||
"X = df[X_features]\n",
|
||||
"y = df[y_features]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"data = json.dumps({'data': X.values.tolist()})\n",
|
||||
"\n",
|
||||
"data_encoded = bytes(data, encoding='utf8')\n",
|
||||
"prediction = service.run(input_data=data_encoded)\n",
|
||||
"print(prediction)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wait 15 minutes for scoring data to be uploaded\n",
|
||||
"\n",
|
||||
"From the Model Data Collector, it can take up to (but usually less than) 15 minutes for data to arrive in your blob storage account. \n",
|
||||
"\n",
|
||||
"Wait 15 minutes to ensure cells below will run."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"\n",
|
||||
"time.sleep(900)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Get scoring dataset thats been created\n",
|
||||
"\n",
|
||||
"Scoring dataset will be created automatically for each model/version/service that has been deployed and registered with name in the format of inference-data-elevation-{model_name}-{model_version}-{service_name}\n",
|
||||
"\n",
|
||||
"Wait 15 minutes to ensure cells below will run."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"scoring_dataset_name = \"inference-data-{0}-{1}-{2}\".format(model.name, model.version, service_name)\n",
|
||||
"scoring_dataset = Dataset.get_by_name(ws, scoring_dataset_name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create datadset monitor for scoring dataset against training dataset\n",
|
||||
"\n",
|
||||
"Check out [datadrift on dataset notebook](../../work-with-data/datadrift-tutorial/datadrift-tutorial.ipynb) for more details"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create an Azure Machine Learning Compute cluster\n",
|
||||
"\n",
|
||||
"The data drift capability needs a compute target for computing drift and other data metrics. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import AmlCompute\n",
|
||||
"\n",
|
||||
"compute_name = 'cpu-cluster'\n",
|
||||
"\n",
|
||||
"if compute_name in ws.compute_targets:\n",
|
||||
" compute_target = ws.compute_targets[compute_name]\n",
|
||||
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||
" print('found compute target. just use it. ' + compute_name)\n",
|
||||
"else:\n",
|
||||
" print('creating a new compute target...')\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D3_V2', min_nodes=0, max_nodes=2)\n",
|
||||
"\n",
|
||||
" # create the cluster\n",
|
||||
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
|
||||
"\n",
|
||||
" # can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
" # if no min node count is provided it will use the scale settings for the cluster\n",
|
||||
" compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
|
||||
"\n",
|
||||
" # For a more detailed view of current AmlCompute status, use get_status()\n",
|
||||
" print(compute_target.get_status().serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create and update the data drift object"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.datadrift import DataDriftDetector, AlertConfiguration\n",
|
||||
"\n",
|
||||
"alert_config = AlertConfiguration(['user@contoso.com']) # replace with your email to recieve alerts from the scheduled pipeline after enabling\n",
|
||||
"monitor_name = \"monitor_model_demo\"\n",
|
||||
"baseline = dset # training dataset\n",
|
||||
"target = scoring_dataset # scording dataset\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" monitor = DataDriftDetector.create_from_datasets(ws, monitor_name, baseline, target, \n",
|
||||
" compute_target='cpu-cluster', # compute target for scheduled pipeline and backfills \n",
|
||||
" frequency='Day', # how often to analyze target data\n",
|
||||
" feature_list=None, # list of features to detect drift on\n",
|
||||
" drift_threshold=None, # threshold from 0 to 1 for email alerting\n",
|
||||
" latency=0, # SLA in hours for target data to arrive in the dataset\n",
|
||||
" alert_config=alert_config) # email addresses to send alert\n",
|
||||
"except KeyError:\n",
|
||||
" monitor = DataDriftDetector.get_by_name(ws, monitor_name)\n",
|
||||
" \n",
|
||||
"monitor"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# many monitor settings can be updated \n",
|
||||
"monitor = monitor.update(drift_threshold = 0.1, feature_list = X_features)\n",
|
||||
"\n",
|
||||
"monitor"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Analyze today's scoring data\n",
|
||||
"\n",
|
||||
"Perform a data drift run on the data sent to the service earlier in this notebook. If you set your email address in the alert configuration and the drift threshold <=0.1 you should recieve an email alert to drift from this run.\n",
|
||||
"\n",
|
||||
"Wait for the run to complete before getting the results. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from datetime import datetime\n",
|
||||
"\n",
|
||||
"now = datetime.utcnow()\n",
|
||||
"target_date = datetime(now.year, now.month, now.day)\n",
|
||||
"analysis_run = monitor.backfill(target_date, target_date)\n",
|
||||
"analysis_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Query metrics and show results in Python\n",
|
||||
"\n",
|
||||
"The below cell will plot some key data drift metrics, and can be used to query the results. Run `help(monitor.get_output)` for specifics on the object returned."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"analysis_run.wait_for_completion(wait_post_processing=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Get and view results and metrics\n",
|
||||
"\n",
|
||||
"For enterprise workspaces, the UI in the Azure Machine Learning studio can be used. Otherwise, the metrics can be queried in Python and plotted. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# get results from Python SDK after the analysis run finishes\n",
|
||||
"results, metrics = monitor.get_output(start_time=target_date, end_time=target_date)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# plot the results from Python SDK \n",
|
||||
"monitor.show(start_time=target_date, end_time=target_date)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Enable the monitor's pipeline schedule\n",
|
||||
"\n",
|
||||
"Turn on a scheduled pipeline which will anlayze the serving dataset for drift. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# enable the pipeline schedule and recieve email alerts\n",
|
||||
"monitor.enable_schedule()\n",
|
||||
"\n",
|
||||
"# disable the pipeline schedule \n",
|
||||
"#monitor.disable_schedule()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Delete the DataDriftDetector\n",
|
||||
"\n",
|
||||
"Invoking the `delete()` method on the object deletes the the drift monitor permanently and cannot be undone. You will no longer be able to find it in the UI and the `list()` or `get()` methods. The object on which delete() was called will have its state set to deleted and name suffixed with deleted. The baseline and target datasets and model data that was collected, if any, are not deleted. The compute is not deleted. The DataDrift schedule pipeline is disabled and archived."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"monitor.delete()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
" * See [our documentation](https://aka.ms/datadrift/aks) or [Python SDK reference](https://docs.microsoft.com/python/api/overview/azure/ml/intro)\n",
|
||||
" * [Send requests or feedback](mailto:driftfeedback@microsoft.com) on data drift directly to the team\n",
|
||||
" * Please open issues with data drift here on GitHub or on StackOverflow if others are likely to run into the same issue"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "jamgan"
|
||||
}
|
||||
],
|
||||
"category": "tutorial",
|
||||
"compute": [
|
||||
"Remote"
|
||||
],
|
||||
"datasets": [
|
||||
"NOAA"
|
||||
],
|
||||
"deployment": [
|
||||
"AKS"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"Azure ML"
|
||||
],
|
||||
"friendly_name": "Data drift on aks",
|
||||
"index_order": 1,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.10"
|
||||
},
|
||||
"star_tag": [
|
||||
"featured"
|
||||
],
|
||||
"tags": [
|
||||
"Dataset",
|
||||
"Timeseries",
|
||||
"Drift"
|
||||
],
|
||||
"task": "Filtering"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
name: drift-on-aks
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-datadrift
|
||||
- azureml-monitoring
|
||||
- azureml-opendatasets
|
||||
- azureml-widgets
|
||||
Binary file not shown.
@@ -1,11 +0,0 @@
|
||||
name: project_environment
|
||||
dependencies:
|
||||
- python=3.6.2
|
||||
- pip:
|
||||
- azureml-core
|
||||
- azureml-defaults
|
||||
- azureml-monitoring
|
||||
- scikit-learn
|
||||
- numpy
|
||||
- packaging
|
||||
- inference-schema[numpy-support]
|
||||
@@ -1,44 +0,0 @@
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from azureml.monitoring import ModelDataCollector
|
||||
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
|
||||
from inference_schema.schema_decorators import input_schema, output_schema
|
||||
# sklearn.externals.joblib is removed in 0.23
|
||||
from sklearn import __version__ as sklearnver
|
||||
from packaging.version import Version
|
||||
if Version(sklearnver) < Version("0.23.0"):
|
||||
from sklearn.externals import joblib
|
||||
else:
|
||||
import joblib
|
||||
|
||||
|
||||
def init():
|
||||
global model
|
||||
global inputs_dc
|
||||
inputs_dc = ModelDataCollector('elevation-regression-model.pkl', designation='inputs',
|
||||
feature_names=['latitude', 'longitude', 'temperature', 'windAngle', 'windSpeed'])
|
||||
# note here "elevation-regression-model.pkl" is the name of the model registered under
|
||||
# this is a different behavior than before when the code is run locally, even though the code is the same.
|
||||
# AZUREML_MODEL_DIR is an environment variable created during deployment.
|
||||
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
|
||||
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
|
||||
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'elevation-regression-model.pkl')
|
||||
model = joblib.load(model_path)
|
||||
|
||||
|
||||
input_sample = np.array([[30, -85, 21, 150, 6]])
|
||||
output_sample = np.array([8.995])
|
||||
|
||||
|
||||
@input_schema('data', NumpyParameterType(input_sample))
|
||||
@output_schema(NumpyParameterType(output_sample))
|
||||
def run(data):
|
||||
try:
|
||||
inputs_dc.collect(data)
|
||||
result = model.predict(data)
|
||||
# you can return any datatype as long as it is JSON-serializable
|
||||
return result.tolist()
|
||||
except Exception as e:
|
||||
error = str(e)
|
||||
return error
|
||||
@@ -100,7 +100,7 @@
|
||||
"\n",
|
||||
"# Check core SDK version number\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using SDK version 1.12.0, you are currently running version\", azureml.core.VERSION)"
|
||||
"print(\"This notebook was created using SDK version 1.13.0, you are currently running version\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
name: train-and-deploy-keras-auto-logging
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- numpy
|
||||
- azureml-mlflow
|
||||
- matplotlib
|
||||
- tensorflow==2.1
|
||||
- keras
|
||||
@@ -1,9 +0,0 @@
|
||||
name: train-and-deploy-pytorch
|
||||
dependencies:
|
||||
- pytorch==1.4.0 -c pytorch
|
||||
- torchvision -c pytorch
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- numpy
|
||||
- azureml-mlflow
|
||||
- matplotlib
|
||||
@@ -1,406 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Train using Azure Machine Learning Compute Instance\n",
|
||||
"\n",
|
||||
"* Initialize Workspace\n",
|
||||
"* Introduction to ComputeInstance\n",
|
||||
"* Create an Experiment\n",
|
||||
"* Submit ComputeInstance run\n",
|
||||
"* Additional operations to perform on ComputeInstance"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"If you are using an Azure Machine Learning ComputeInstance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Check core SDK version number\n",
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize Workspace\n",
|
||||
"\n",
|
||||
"Initialize a workspace object"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"create workspace"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction to ComputeInstance\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Azure Machine Learning compute instance is a fully-managed cloud-based workstation optimized for your machine learning development environment. It is created **within your workspace region**.\n",
|
||||
"\n",
|
||||
"For more information on ComputeInstance, please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/concept-compute-instance)\n",
|
||||
"\n",
|
||||
"**Note**: As with other Azure services, there are limits on certain resources (for eg. AmlCompute quota) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create ComputeInstance\n",
|
||||
"First lets check which VM families are available in your region. Azure is a regional service and some specialized SKUs (especially GPUs) are only available in certain regions. Since ComputeInstance is created in the region of your workspace, we will use the supported_vms () function to see if the VM family we want to use ('STANDARD_D3_V2') is supported.\n",
|
||||
"\n",
|
||||
"You can also pass a different region to check availability and then re-create your workspace in that region through the [configuration notebook](../../../configuration.ipynb)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"msdoc": "how-to-auto-train-remote.md",
|
||||
"name": "check_region"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, ComputeInstance\n",
|
||||
"\n",
|
||||
"ComputeInstance.supported_vmsizes(workspace = ws)\n",
|
||||
"# ComputeInstance.supported_vmsizes(workspace = ws, location='eastus')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"msdoc": "how-to-auto-train-remote.md",
|
||||
"name": "create_instance"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import datetime\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"from azureml.core.compute import ComputeTarget, ComputeInstance\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your instance\n",
|
||||
"# Compute instance name should be unique across the azure region\n",
|
||||
"compute_name = \"ci{}\".format(ws._workspace_id)[:10]\n",
|
||||
"\n",
|
||||
"# Verify that instance does not exist already\n",
|
||||
"try:\n",
|
||||
" instance = ComputeInstance(workspace=ws, name=compute_name)\n",
|
||||
" print('Found existing instance, use it.')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = ComputeInstance.provisioning_configuration(\n",
|
||||
" vm_size='STANDARD_D3_V2',\n",
|
||||
" ssh_public_access=False,\n",
|
||||
" # vnet_resourcegroup_name='<my-resource-group>',\n",
|
||||
" # vnet_name='<my-vnet-name>',\n",
|
||||
" # subnet_name='default',\n",
|
||||
" # admin_user_ssh_public_key='<my-sshkey>'\n",
|
||||
" )\n",
|
||||
" instance = ComputeInstance.create(ws, compute_name, compute_config)\n",
|
||||
" instance.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create An Experiment\n",
|
||||
"\n",
|
||||
"**Experiment** is a logical container in an Azure ML Workspace. It hosts run records which can include run metrics and output artifacts from your experiments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"experiment_name = 'train-on-computeinstance'\n",
|
||||
"experiment = Experiment(workspace = ws, name = experiment_name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Submit ComputeInstance run\n",
|
||||
"The training script `train.py` is already created for you"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create environment\n",
|
||||
"\n",
|
||||
"Create an environment with scikit-learn installed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Environment\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"myenv = Environment(\"myenv\")\n",
|
||||
"myenv.python.conda_dependencies = CondaDependencies.create(conda_packages=['scikit-learn'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure & Run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"from azureml.core.runconfig import DEFAULT_CPU_IMAGE\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory='', script='train.py')\n",
|
||||
"\n",
|
||||
"# Set compute target to the one created in previous step\n",
|
||||
"src.run_config.target = instance\n",
|
||||
"\n",
|
||||
"# Set environment\n",
|
||||
"src.run_config.environment = myenv\n",
|
||||
" \n",
|
||||
"run = experiment.submit(config=src)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can use the get_active_runs() to get the currently running or queued jobs on the compute instance"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# wait for the run to reach Queued or Running state if it is in Preparing state\n",
|
||||
"status = run.get_status()\n",
|
||||
"while status not in ['Queued', 'Running', 'Completed', 'Failed', 'Canceled']:\n",
|
||||
" state = run.get_status()\n",
|
||||
" print('Run status: {}'.format(status))\n",
|
||||
" time.sleep(10)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# get active runs which are in Queued or Running state\n",
|
||||
"active_runs = instance.get_active_runs()\n",
|
||||
"for active_run in active_runs:\n",
|
||||
" print(active_run.run_id, ',', active_run.status)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run.wait_for_completion()\n",
|
||||
"print(run.get_metrics())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Additional operations to perform on ComputeInstance\n",
|
||||
"\n",
|
||||
"You can perform more operations on ComputeInstance such as get status, change the state or deleting the compute."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"msdoc": "how-to-auto-train-remote.md",
|
||||
"name": "get_status"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# get_status() gets the latest status of the ComputeInstance target\n",
|
||||
"instance.get_status()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"msdoc": "how-to-auto-train-remote.md",
|
||||
"name": "stop"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# stop() is used to stop the ComputeInstance\n",
|
||||
"# Stopping ComputeInstance will stop the billing meter and persist the state on the disk.\n",
|
||||
"# Available Quota will not be changed with this operation.\n",
|
||||
"instance.stop(wait_for_completion=True, show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"msdoc": "how-to-auto-train-remote.md",
|
||||
"name": "start"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# start() is used to start the ComputeInstance if it is in stopped state\n",
|
||||
"instance.start(wait_for_completion=True, show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# restart() is used to restart the ComputeInstance\n",
|
||||
"instance.restart(wait_for_completion=True, show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# delete() is used to delete the ComputeInstance target. Useful if you want to re-use the compute name \n",
|
||||
"# instance.delete(wait_for_completion=True, show_output=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "ramagott"
|
||||
}
|
||||
],
|
||||
"category": "training",
|
||||
"compute": [
|
||||
"Compute Instance"
|
||||
],
|
||||
"datasets": [
|
||||
"Diabetes"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"None"
|
||||
],
|
||||
"friendly_name": "Train on Azure Machine Learning Compute Instance",
|
||||
"index_order": 1,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.7"
|
||||
},
|
||||
"tags": [
|
||||
"None"
|
||||
],
|
||||
"task": "Submit a run on Azure Machine Learning Compute Instance."
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
name: train-on-computeinstance
|
||||
dependencies:
|
||||
- scikit-learn
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-widgets
|
||||
@@ -1,48 +0,0 @@
|
||||
# Copyright (c) Microsoft. All rights reserved.
|
||||
# Licensed under the MIT license.
|
||||
|
||||
from sklearn.datasets import load_diabetes
|
||||
from sklearn.linear_model import Ridge
|
||||
from sklearn.metrics import mean_squared_error
|
||||
from sklearn.model_selection import train_test_split
|
||||
from azureml.core.run import Run
|
||||
import os
|
||||
import numpy as np
|
||||
# sklearn.externals.joblib is removed in 0.23
|
||||
try:
|
||||
from sklearn.externals import joblib
|
||||
except ImportError:
|
||||
import joblib
|
||||
|
||||
os.makedirs('./outputs', exist_ok=True)
|
||||
|
||||
X, y = load_diabetes(return_X_y=True)
|
||||
|
||||
run = Run.get_context()
|
||||
|
||||
X_train, X_test, y_train, y_test = train_test_split(X, y,
|
||||
test_size=0.2,
|
||||
random_state=0)
|
||||
data = {"train": {"X": X_train, "y": y_train},
|
||||
"test": {"X": X_test, "y": y_test}}
|
||||
|
||||
# list of numbers from 0.0 to 1.0 with a 0.05 interval
|
||||
alphas = np.arange(0.0, 1.0, 0.05)
|
||||
|
||||
for alpha in alphas:
|
||||
# Use Ridge algorithm to create a regression model
|
||||
reg = Ridge(alpha=alpha)
|
||||
reg.fit(data["train"]["X"], data["train"]["y"])
|
||||
|
||||
preds = reg.predict(data["test"]["X"])
|
||||
mse = mean_squared_error(preds, data["test"]["y"])
|
||||
run.log('alpha', alpha)
|
||||
run.log('mse', mse)
|
||||
|
||||
model_file_name = 'ridge_{0:.2f}.pkl'.format(alpha)
|
||||
# save model in the outputs folder so it automatically get uploaded
|
||||
with open(model_file_name, "wb") as file:
|
||||
joblib.dump(value=reg, filename=os.path.join('./outputs/',
|
||||
model_file_name))
|
||||
|
||||
print('alpha is {0:.2f}, and mse is {1:0.2f}'.format(alpha, mse))
|
||||
@@ -1,4 +1,5 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def convert(imgf, labelf, outf, n):
|
||||
@@ -23,8 +24,8 @@ def convert(imgf, labelf, outf, n):
|
||||
l.close()
|
||||
|
||||
|
||||
mounted_input_path = os.environ['fashion_ds']
|
||||
mounted_output_path = os.environ['AZUREML_DATAREFERENCE_prepared_fashion_ds']
|
||||
mounted_input_path = sys.argv[1]
|
||||
mounted_output_path = sys.argv[2]
|
||||
os.makedirs(mounted_output_path, exist_ok=True)
|
||||
|
||||
convert(os.path.join(mounted_input_path, 'train-images-idx3-ubyte'),
|
||||
|
||||
@@ -65,12 +65,9 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Dataset, Datastore, ComputeTarget, RunConfiguration, Experiment\n",
|
||||
"from azureml.core.runconfig import CondaDependencies\n",
|
||||
"from azureml.core import Workspace, Dataset, Datastore, ComputeTarget, Experiment\n",
|
||||
"from azureml.pipeline.steps import PythonScriptStep, EstimatorStep\n",
|
||||
"from azureml.pipeline.core import Pipeline, PipelineData\n",
|
||||
"from azureml.train.dnn import TensorFlow\n",
|
||||
"\n",
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"# check core SDK version number\n",
|
||||
"print(\"Azure ML SDK Version: \", azureml.core.VERSION)"
|
||||
]
|
||||
@@ -141,7 +138,7 @@
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# choose a name for your cluster\n",
|
||||
"cluster_name = \"gpu-cluster\"\n",
|
||||
"cluster_name = \"amlcomp\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=workspace, name=cluster_name)\n",
|
||||
@@ -168,9 +165,9 @@
|
||||
"source": [
|
||||
"## Create the Fashion MNIST dataset\n",
|
||||
"\n",
|
||||
"By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred. \n",
|
||||
"By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred.\n",
|
||||
"\n",
|
||||
"Every workspace comes with a default [datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data) (and you can register more) which is backed by the Azure blob storage account associated with the workspace. We can use it to transfer data from local to the cloud, and create a dataset from it. We will now upload the [Fashion MNIST](./keras-mnist-fashion) to the default datastore (blob) within your workspace."
|
||||
"Every workspace comes with a default [datastore](https://docs.microsoft.com/azure/machine-learning/service/how-to-access-data) (and you can register more) which is backed by the Azure blob storage account associated with the workspace. We can use it to transfer data from local to the cloud, and create a dataset from it. We will now upload the [Fashion MNIST](./data) to the default datastore (blob) within your workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -180,8 +177,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore = workspace.get_default_datastore()\n",
|
||||
"datastore.upload_files(files = ['keras-mnist-fashion/t10k-images-idx3-ubyte', 'keras-mnist-fashion/t10k-labels-idx1-ubyte',\n",
|
||||
" 'keras-mnist-fashion/train-images-idx3-ubyte','keras-mnist-fashion/train-labels-idx1-ubyte'],\n",
|
||||
"datastore.upload_files(files = ['data/t10k-images-idx3-ubyte', 'data/t10k-labels-idx1-ubyte',\n",
|
||||
" 'data/train-images-idx3-ubyte','data/train-labels-idx1-ubyte'],\n",
|
||||
" target_path = 'mnist-fashion',\n",
|
||||
" overwrite = True,\n",
|
||||
" show_progress = True)"
|
||||
@@ -191,7 +188,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Then we will create an unregistered FileDataset pointing to the path in the datastore. You can also create a dataset from multiple paths. [Learn More](https://aka.ms/azureml/howto/createdatasets) "
|
||||
"Then we will create an unregistered FileDataset pointing to the path in the datastore. You can also create a dataset from multiple paths. [Learn More](https://aka.ms/azureml/howto/createdatasets) "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -212,7 +209,7 @@
|
||||
"source": [
|
||||
"## Build 2-step ML pipeline\n",
|
||||
"\n",
|
||||
"The [Azure Machine Learning Pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines) enables data scientists to create and manage multiple simple and complex workflows concurrently. A typical pipeline would have multiple tasks to prepare data, train, deploy and evaluate models. Individual steps in the pipeline can make use of diverse compute options (for example: CPU for data preparation and GPU for training) and languages. [Learn More](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/machine-learning-pipelines)\n",
|
||||
"The [Azure Machine Learning Pipeline](https://docs.microsoft.com/azure/machine-learning/service/concept-ml-pipelines) enables data scientists to create and manage multiple simple and complex workflows concurrently. A typical pipeline would have multiple tasks to prepare data, train, deploy and evaluate models. Individual steps in the pipeline can make use of diverse compute options (for example: CPU for data preparation and GPU for training) and languages. [Learn More](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/machine-learning-pipelines)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Step 1: data preparation\n",
|
||||
@@ -222,28 +219,11 @@
|
||||
"Each image is 28 pixels in height and 28 pixels in width, for a total of 784 pixels in total. Each pixel has a single pixel-value associated with it, indicating the lightness or darkness of that pixel, with higher numbers meaning darker. This pixel-value is an integer between 0 and 255. Both mnist_train.csv and mnist_test.csv contain 785 columns. The first column consists of the class labels, which represent the article of clothing. The rest of the columns contain the pixel-values of the associated image."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# set up the compute environment to install required packages\n",
|
||||
"conda = CondaDependencies.create(\n",
|
||||
" pip_packages=['azureml-sdk','azureml-dataset-runtime[fuse,pandas]'],\n",
|
||||
" pin_sdk_version=False)\n",
|
||||
"\n",
|
||||
"conda.set_pip_option('--pre')\n",
|
||||
"\n",
|
||||
"run_config = RunConfiguration()\n",
|
||||
"run_config.environment.python.conda_dependencies = conda"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Intermediate data (or output of a step) is represented by a `PipelineData` object. preprared_fashion_ds is produced as the output of step 1, and used as the input of step 2. PipelineData introduces a data dependency between steps, and creates an implicit execution order in the pipeline. You can register a `PipelineData` as a dataset and version the output data automatically. [Learn More](https://docs.microsoft.com/azure/machine-learning/service/how-to-version-track-datasets#version-a-pipeline-output-dataset) "
|
||||
"Intermediate data (or output of a step) is represented by a `OutputFileDatasetConfig` object. preprared_fashion_ds is produced as the output of step 1, and used as the input of step 2. `OutputFileDatasetConfig` introduces a data dependency between steps, and creates an implicit execution order in the pipeline. You can register a `OutputFileDatasetConfig` as a dataset and version the output data automatically."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -252,18 +232,28 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# define output data\n",
|
||||
"prepared_fashion_ds = PipelineData('prepared_fashion_ds', datastore=datastore).as_dataset()\n",
|
||||
"from azureml.data import OutputFileDatasetConfig\n",
|
||||
"\n",
|
||||
"# register output data as dataset\n",
|
||||
"prepared_fashion_ds = prepared_fashion_ds.register(name='prepared_fashion_ds', create_new_version=True)"
|
||||
"# learn more about the output config\n",
|
||||
"help(OutputFileDatasetConfig)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# write output to datastore under folder `outputdataset` and register it as a dataset after the experiment completes\n",
|
||||
"# make sure the service principal in your datastore has blob data contributor role in order to write data back\n",
|
||||
"prepared_fashion_ds = OutputFileDatasetConfig(destination=(datastore, 'outputdataset/{run-id}')).register_on_complete(name='prepared_fashion_ds')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"A **PythonScriptStep** is a basic, built-in step to run a Python Script on a compute target. It takes a script name and optionally other parameters like arguments for the script, compute target, inputs and outputs. If no compute target is specified, default compute target for the workspace is used. You can also use a [**RunConfiguration**](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.runconfiguration?view=azure-ml-py) to specify requirements for the PythonScriptStep, such as conda dependencies and docker image."
|
||||
"A **PythonScriptStep** is a basic, built-in step to run a Python Script on a compute target. It takes a script name and optionally other parameters like arguments for the script, compute target, inputs and outputs. If no compute target is specified, default compute target for the workspace is used. You can also use a [**RunConfiguration**](https://docs.microsoft.com/python/api/azureml-core/azureml.core.runconfiguration?view=azure-ml-py) to specify requirements for the PythonScriptStep, such as conda dependencies and docker image."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -275,12 +265,10 @@
|
||||
"prep_step = PythonScriptStep(name='prepare step',\n",
|
||||
" script_name=\"prepare.py\",\n",
|
||||
" # mount fashion_ds dataset to the compute_target\n",
|
||||
" inputs=[fashion_ds.as_named_input('fashion_ds').as_mount()],\n",
|
||||
" outputs=[prepared_fashion_ds],\n",
|
||||
" arguments=[fashion_ds.as_named_input('fashion_ds').as_mount(), prepared_fashion_ds],\n",
|
||||
" source_directory=script_folder,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" runconfig=run_config,\n",
|
||||
" allow_reuse=False)"
|
||||
" allow_reuse=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -289,9 +277,7 @@
|
||||
"source": [
|
||||
"### Step 2: train CNN with Keras\n",
|
||||
"\n",
|
||||
"Next, we construct an `azureml.train.dnn.TensorFlow` estimator object. The TensorFlow estimator is providing a simple way of launching a TensorFlow training job on a compute target. It will automatically provide a docker image that has TensorFlow installed.\n",
|
||||
"\n",
|
||||
"[EstimatorStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.estimator_step.estimatorstep?view=azure-ml-py) adds a step to run Tensorflow Estimator in a Pipeline. It takes a dataset as the input."
|
||||
"Next, we construct an `azureml.train.Estimator` estimator object. [EstimatorStep](https://docs.microsoft.com/python/api/azureml-pipeline-steps/azureml.pipeline.steps.estimator_step.estimatorstep?view=azure-ml-py) adds a step to run Tensorflow Estimator in a Pipeline. It takes a dataset as the input."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -300,17 +286,17 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# set up training step with Tensorflow estimator\n",
|
||||
"est = TensorFlow(entry_script='train.py',\n",
|
||||
" source_directory=script_folder,\n",
|
||||
" pip_packages = ['azureml-sdk', 'keras<=2.3.1', 'tensorflow==2.1.0', 'numpy','scikit-learn', 'matplotlib'],\n",
|
||||
" compute_target=compute_target)\n",
|
||||
"from azureml.train.estimator import Estimator\n",
|
||||
"# set up training step with Estimator\n",
|
||||
"est = Estimator(entry_script='train.py',\n",
|
||||
" source_directory=script_folder,\n",
|
||||
" pip_packages=['keras','tensorflow','numpy','scikit-learn', 'matplotlib','pandas'],\n",
|
||||
" compute_target=compute_target)\n",
|
||||
"\n",
|
||||
"est_step = EstimatorStep(name='train step',\n",
|
||||
" estimator=est,\n",
|
||||
" estimator_entry_script_arguments=[],\n",
|
||||
" # parse prepared_fashion_ds into TabularDataset and use it as the input\n",
|
||||
" inputs=[prepared_fashion_ds.parse_delimited_files()],\n",
|
||||
" # parse prepared_fashion_ds into tabulardataset and use it as input\n",
|
||||
" estimator_entry_script_arguments=[prepared_fashion_ds.read_delimited_files().as_input(name='prepared_fashion_ds')],\n",
|
||||
" compute_target=compute_target)"
|
||||
]
|
||||
},
|
||||
@@ -321,7 +307,7 @@
|
||||
"### Build the pipeline\n",
|
||||
"Once we have the steps (or steps collection), we can build the [pipeline](https://docs.microsoft.com/python/api/azureml-pipeline-core/azureml.pipeline.core.pipeline.pipeline?view=azure-ml-py).\n",
|
||||
"\n",
|
||||
"A pipeline is created with a list of steps and a workspace. Submit a pipeline using [submit](https://docs.microsoft.com/python/api/azureml-core/azureml.core.experiment(class)?view=azure-ml-py#submit-config--tags-none----kwargs-). When submit is called, a [PipelineRun](https://docs.microsoft.com/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelinerun?view=azure-ml-py) is created which in turn creates [StepRun](https://docs.microsoft.com/python/api/azureml-pipeline-core/azureml.pipeline.core.steprun?view=azure-ml-py) objects for each step in the workflow."
|
||||
"A pipeline is created with a list of steps and a workspace. Submit a pipeline using `submit`. When submit is called, a [PipelineRun](https://docs.microsoft.com/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelinerun?view=azure-ml-py) is created which in turn creates [StepRun](https://docs.microsoft.com/python/api/azureml-pipeline-core/azureml.pipeline.core.steprun?view=azure-ml-py) objects for each step in the workflow."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -374,23 +360,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Azure Machine Learning dataset makes it easy to trace how your data is used in ML. [Learn More](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-version-track-datasets#track-datasets-in-experiments)<br>\n",
|
||||
"For each Machine Learning experiment, you can easily trace the datasets used as the input through `Run` object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# get input datasets\n",
|
||||
"prep_step = run.find_step_run('prepare step')[0]\n",
|
||||
"inputs = prep_step.get_details()['inputDatasets']\n",
|
||||
"input_dataset = inputs[0]['dataset']\n",
|
||||
"\n",
|
||||
"# list the files referenced by input_dataset\n",
|
||||
"input_dataset.to_path()"
|
||||
"Azure Machine Learning dataset makes it easy to trace how your data is used in ML. [Learn More](https://docs.microsoft.com/azure/machine-learning/service/how-to-version-track-datasets#track-datasets-in-experiments)<br>"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -406,11 +376,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fashion_ds = input_dataset.register(workspace = workspace,\n",
|
||||
" name = 'fashion_ds',\n",
|
||||
" description = 'image and label files from fashion mnist',\n",
|
||||
" create_new_version = True)\n",
|
||||
"fashion_ds"
|
||||
"fashion_ds = fashion_ds.register(workspace = workspace,\n",
|
||||
" name = 'fashion_ds',\n",
|
||||
" description = 'image and label files from fashion mnist',\n",
|
||||
" create_new_version = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -0,0 +1,320 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use ScriptRun with data input and output\n",
|
||||
"\n",
|
||||
"This notebook shows how to use [ScriptRun](https://docs.microsoft.com/python/api/azureml-core/azureml.core.script_run.scriptrun?view=azure-ml-py) with input and output. A run submitted with ScriptRunConfig represents a single trial in an experiment. Submitting the run returns a ScriptRun object, which can be used to monitor the asynchronous execution of the run, log metrics and store output of the run, and analyze results and access artifacts generated by the run.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Prerequisite:\n",
|
||||
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](https://aka.ms/pl-config) to:\n",
|
||||
" * install the AML SDK\n",
|
||||
" * create a workspace and its configuration file (`config.json`)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize workspace\n",
|
||||
"Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create or Attach existing AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `STANDARD_NC6` GPU VMs. This process is broken down into 3 steps:\n",
|
||||
"1. create the configuration (this step is local and only takes a second)\n",
|
||||
"2. create the cluster (this step will take about **20 seconds**)\n",
|
||||
"3. provision the VMs to bring the cluster to the initial size (of 1 in this case). This step will take about **3-5 minutes** and is providing only sparse output in the process. Please make sure to wait until the call returns before moving to the next cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# choose a name for your cluster\n",
|
||||
"cluster_name = \"amlcomp\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" cpu_cluster = ComputeTarget(workspace=ws, name=cluster_name)\n",
|
||||
" print('Found existing compute target')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" print('Creating a new compute target...')\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', max_nodes=4)\n",
|
||||
"\n",
|
||||
" # create the cluster\n",
|
||||
" cpu_cluster = ComputeTarget.create(ws, cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
" # can poll for a minimum number of nodes and for a specific timeout. \n",
|
||||
" # if no min node count is provided it uses the scale settings for the cluster\n",
|
||||
" cpu_cluster.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
|
||||
"\n",
|
||||
"# use get_status() to get a detailed status for the current cluster. \n",
|
||||
"print(cpu_cluster.get_status().serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now that you have created the compute target, let's see what the workspace's `compute_targets` property returns. You should now see one entry named 'mlc' of type `AmlCompute`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use a simple script\n",
|
||||
"We have already created a simple \"hello world\" script. This is the script that we will submit through the [ScriptRunConfig](https://docs.microsoft.com/python/api/azureml-core/azureml.core.script_run_config.scriptrunconfig?view=azure-ml-py). It reads iris dataset as input, and write it out to `outputdataset` folder in default blob datastore. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"source_directory = 'script_run'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile $source_directory/dummy_train.py\n",
|
||||
"\n",
|
||||
"# Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"# Licensed under the MIT License.\n",
|
||||
"import sys\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"print(\"*********************************************************\")\n",
|
||||
"print(\"Hello Azure ML!\")\n",
|
||||
"\n",
|
||||
"mounted_input_path = sys.argv[1]\n",
|
||||
"mounted_output_path = sys.argv[2]\n",
|
||||
"\n",
|
||||
"print(\"Argument 1: %s\" % mounted_input_path)\n",
|
||||
"print(\"Argument 2: %s\" % mounted_output_path)\n",
|
||||
" \n",
|
||||
"with open(mounted_input_path, 'r') as f:\n",
|
||||
" content = f.read()\n",
|
||||
" with open(os.path.join(mounted_output_path, 'output.csv'), 'w') as fw:\n",
|
||||
" fw.write(content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Every workspace comes with a default datastore (and you can register more) which is backed by the Azure blob storage account associated with the workspace. We can use it to transfer data from local to the cloud, and create dataset from it. We will now upload the Iris data to the default datastore (blob) within your workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def_blob_store = ws.get_default_datastore()\n",
|
||||
"def_blob_store.upload_files(files = ['iris.csv'],\n",
|
||||
" target_path = 'script-run/',\n",
|
||||
" overwrite = True,\n",
|
||||
" show_progress = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we are ready to define the input and output of your script. They can be passed in via `arguments`, which is a list of command-line arguments to pass to the training script specified in `script`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"from azureml.data import OutputFileDatasetConfig\n",
|
||||
"\n",
|
||||
"input_data = Dataset.File.from_files(def_blob_store.path('script-run/iris.csv')).as_named_input('input').as_mount()\n",
|
||||
"\n",
|
||||
"# output is configured to write the result back to def_blob_store, under\"may_sample/outputdataset\" folder\n",
|
||||
"# learn more about options to configure the output, run 'help(OutputFileDatasetConfig)'\n",
|
||||
"output = OutputFileDatasetConfig(destination=(def_blob_store, 'sample/outputdataset'))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Environment\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"myenv = Environment(\"myenv\")\n",
|
||||
"\n",
|
||||
"myenv.docker.enabled = True\n",
|
||||
"myenv.python.conda_dependencies = CondaDependencies.create(pip_packages=['azureml-sdk>=1.12.0'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=source_directory, \n",
|
||||
" script='dummy_train.py', \n",
|
||||
" # to mount the dataset on the remote compute and pass the mounted path as an argument to the training script\n",
|
||||
" arguments =[input_data, output])\n",
|
||||
"\n",
|
||||
"src.run_config.framework = 'python'\n",
|
||||
"src.run_config.target = cpu_cluster.name\n",
|
||||
"\n",
|
||||
"# Set environment\n",
|
||||
"src.run_config.environment = myenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Build and Submit the Experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"exp = Experiment(ws, 'ScriptRun_sample')\n",
|
||||
"run = exp.submit(config=src)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## View Run Details"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run.wait_for_completion(show_output=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "sihhu"
|
||||
}
|
||||
],
|
||||
"category": "tutorial",
|
||||
"compute": [
|
||||
"AML Compute"
|
||||
],
|
||||
"datasets": [
|
||||
"Custom"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"Azure ML"
|
||||
],
|
||||
"friendly_name": "How to use ScriptRun with data input and output",
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.9"
|
||||
},
|
||||
"order_index": 7,
|
||||
"star_tag": [
|
||||
"None"
|
||||
],
|
||||
"tags": [
|
||||
"Dataset",
|
||||
"ScriptRun"
|
||||
],
|
||||
"task": "Demonstrates the use of Scriptrun with datasets"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
name: how-to-use-scriptrun
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
@@ -0,0 +1,19 @@
|
||||
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License.
|
||||
import sys
|
||||
import os
|
||||
|
||||
print("*********************************************************")
|
||||
print("Hello Azure ML!")
|
||||
|
||||
mounted_input_path = sys.argv[1]
|
||||
mounted_output_path = sys.argv[2]
|
||||
|
||||
print("Argument 1: %s" % mounted_input_path)
|
||||
print("Argument 2: %s" % mounted_output_path)
|
||||
|
||||
with open(mounted_input_path, 'r') as f:
|
||||
content = f.read()
|
||||
with open(os.path.join(mounted_output_path, 'output.csv'), 'w') as fw:
|
||||
fw.write(content)
|
||||
4
index.md
4
index.md
@@ -19,7 +19,6 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
|
||||
| [Forecasting BikeShare Demand](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb) | Forecasting | BikeShare | Remote | None | Azure ML AutoML | Forecasting |
|
||||
| [Forecasting orange juice sales with deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb) | Forecasting | Orange Juice Sales | Remote | Azure Container Instance | Azure ML AutoML | None |
|
||||
| [Register a model and deploy locally](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local.ipynb) | Deployment | None | Local | Local | None | None |
|
||||
| :star:[Data drift on aks](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/monitor-models/data-drift/drift-on-aks.ipynb) | Filtering | NOAA | Remote | AKS | Azure ML | Dataset, Timeseries, Drift |
|
||||
| :star:[Data drift quickdemo](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datadrift-tutorial/datadrift-tutorial.ipynb) | Filtering | NOAA | Remote | None | Azure ML | Dataset, Timeseries, Drift |
|
||||
| :star:[Introduction to labeled datasets](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/labeled-datasets/labeled-datasets.ipynb) | Train | | Remote | None | Azure ML | Dataset, label, Estimator |
|
||||
| :star:[Datasets with ML Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb) | Train | Fashion MNIST | Remote | None | Azure ML | Dataset, Pipeline, Estimator, ScriptRun |
|
||||
@@ -48,6 +47,7 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
|
||||
| [How to use run a notebook as a step in AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-notebook-runner-step.ipynb) | Demonstrates the use of NotebookRunnerStep | Custom | AML Compute | None | Azure ML | None |
|
||||
| [Use MLflow with Azure Machine Learning to Train and Deploy Keras Image Classifier](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-and-deploy-keras-auto-logging/train-and-deploy-keras-auto-logging.ipynb) | Use MLflow with Azure Machine Learning to Train and Deploy Keras Image Classifier, leveraging MLflow auto logging | MNIST | Local, AML Compute | Azure Container Instance | Keras | mlflow, keras |
|
||||
| [Use MLflow with Azure Machine Learning to Train and Deploy PyTorch Image Classifier](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-and-deploy-pytorch/train-and-deploy-pytorch.ipynb) | Use MLflow with Azure Machine Learning to train and deploy PyTorch image classifier model | MNIST | Local, AML Compute | Azure Container Instance | PyTorch | mlflow, pytorch |
|
||||
| [How to use ScriptRun with data input and output](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/scriptrun-with-data-input-output/how-to-use-scriptrun.ipynb) | Demonstrates the use of Scriptrun with datasets | Custom | AML Compute | None | Azure ML | Dataset, ScriptRun |
|
||||
|
||||
## Training
|
||||
|
||||
@@ -66,7 +66,6 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
|
||||
| [Resuming a model](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb) | Resume a model in TensorFlow from a previously submitted run | MNIST | AML Compute | None | TensorFlow | None |
|
||||
| [Training in Spark](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb) | Submiting a run on a spark cluster | None | HDI cluster | None | PySpark | None |
|
||||
| [Train on Azure Machine Learning Compute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb) | Submit a run on Azure Machine Learning Compute. | Diabetes | AML Compute | None | None | None |
|
||||
| [Train on Azure Machine Learning Compute Instance](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-computeinstance/train-on-computeinstance.ipynb) | Submit a run on Azure Machine Learning Compute Instance. | Diabetes | Compute Instance | None | None | None |
|
||||
| [Train on local compute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-local/train-on-local.ipynb) | Train a model locally | Diabetes | Local | None | None | None |
|
||||
| [Train in a remote Linux virtual machine](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb) | Configure and execute a run | Diabetes | Data Science Virtual Machine | None | None | None |
|
||||
| [Using Tensorboard](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training-with-deep-learning/export-run-history-to-tensorboard/export-run-history-to-tensorboard.ipynb) | Export the run history as Tensorboard logs | None | None | None | TensorFlow | None |
|
||||
@@ -101,6 +100,7 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
|
||||
| [upload-fairness-dashboard](https://github.com/Azure/MachineLearningNotebooks/blob/master//contrib/fairness/upload-fairness-dashboard.ipynb) | | | | | | |
|
||||
| [azure-ml-with-nvidia-rapids](https://github.com/Azure/MachineLearningNotebooks/blob/master//contrib/RAPIDS/azure-ml-with-nvidia-rapids.ipynb) | | | | | | |
|
||||
| [auto-ml-continuous-retraining](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb) | | | | | | |
|
||||
| [auto-ml-regression-model-proxy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/experimental/regression/auto-ml-regression-model-proxy.ipynb) | | | | | | |
|
||||
| [auto-ml-forecasting-beer-remote](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb) | | | | | | |
|
||||
| [auto-ml-forecasting-energy-demand](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb) | | | | | | |
|
||||
| [auto-ml-regression](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb) | | | | | | |
|
||||
|
||||
@@ -102,7 +102,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.13.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -382,7 +382,8 @@
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.5"
|
||||
},
|
||||
"msauthor": "trbye"
|
||||
"msauthor": "trbye",
|
||||
"network_required": false
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
||||
@@ -689,7 +689,8 @@
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.6"
|
||||
},
|
||||
"msauthor": "roastala"
|
||||
"msauthor": "roastala",
|
||||
"network_required": false
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
||||
@@ -567,7 +567,8 @@
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.6"
|
||||
},
|
||||
"msauthor": "sgilley"
|
||||
"msauthor": "sgilley",
|
||||
"network_required": false
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
||||
@@ -610,7 +610,8 @@
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6"
|
||||
},
|
||||
"msauthor": "vkanne"
|
||||
"msauthor": "vkanne",
|
||||
"network_required": false
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
||||
@@ -649,7 +649,8 @@
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
},
|
||||
"msauthor": "trbye"
|
||||
"msauthor": "trbye",
|
||||
"network_required": false
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
||||
Reference in New Issue
Block a user