Compare commits
91 Commits
azureml-sd
...
swinner95-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
90819a87a8 | ||
|
|
73db8ae04d | ||
|
|
3637dce58a | ||
|
|
23771fc599 | ||
|
|
5f04a467b7 | ||
|
|
532f65c998 | ||
|
|
f36dda0c2d | ||
|
|
c7b56929bc | ||
|
|
5f19d75a42 | ||
|
|
a1968aafa2 | ||
|
|
6b82991017 | ||
|
|
725013511e | ||
|
|
6a20160173 | ||
|
|
137db8aec0 | ||
|
|
b7b10c394b | ||
|
|
46206716a4 | ||
|
|
92bb98ac62 | ||
|
|
b398c24262 | ||
|
|
e0618302e3 | ||
|
|
b6cddafa3e | ||
|
|
4188bd2474 | ||
|
|
69126edfcb | ||
|
|
4e14c35b9b | ||
|
|
1608c19aa6 | ||
|
|
46b8611b74 | ||
|
|
fbb01bde70 | ||
|
|
cefe2f0811 | ||
|
|
42e0a31f88 | ||
|
|
8b0998ac9f | ||
|
|
046c6051fb | ||
|
|
bdb7db15ef | ||
|
|
b13139f103 | ||
|
|
8adb206ae3 | ||
|
|
484b6bbb7a | ||
|
|
55ef0bda6a | ||
|
|
1401cdef33 | ||
|
|
5d02206cbd | ||
|
|
c24b65d4ae | ||
|
|
57c5ef318f | ||
|
|
ba033d72f8 | ||
|
|
aa657ac528 | ||
|
|
7d8289679d | ||
|
|
a7c3db0560 | ||
|
|
e548847881 | ||
|
|
08c6b1f4ed | ||
|
|
78abb65f5e | ||
|
|
3c6c090732 | ||
|
|
513e36d9b2 | ||
|
|
9db91a7fb8 | ||
|
|
d9b26b655b | ||
|
|
cb8dc41766 | ||
|
|
9c9b4bb122 | ||
|
|
f5c896c70f | ||
|
|
3b572eddb2 | ||
|
|
51523db294 | ||
|
|
3b4998941c | ||
|
|
6cdbfb8722 | ||
|
|
c086bd69c7 | ||
|
|
279c9b8dc4 | ||
|
|
98589fe335 | ||
|
|
77f21058a2 | ||
|
|
baa65d0886 | ||
|
|
0fffa11b2a | ||
|
|
20ec225343 | ||
|
|
845e9d653e | ||
|
|
639ef81636 | ||
|
|
60158bf41a | ||
|
|
8dbbb01b8a | ||
|
|
6e6b2b0c48 | ||
|
|
85f5721bf8 | ||
|
|
6a7dd741e7 | ||
|
|
314218fc89 | ||
|
|
b50d2725c7 | ||
|
|
9a2f448792 | ||
|
|
dd620f19fd | ||
|
|
8116d31da4 | ||
|
|
ef29dc1fa5 | ||
|
|
97b345cb33 | ||
|
|
282250e670 | ||
|
|
acef60c5b3 | ||
|
|
bfb444eb15 | ||
|
|
6277659bf2 | ||
|
|
1645e12712 | ||
|
|
cc4a32e70b | ||
|
|
997a35aed5 | ||
|
|
dd6317a4a0 | ||
|
|
82d8353d54 | ||
|
|
59a01c17a0 | ||
|
|
e31e1d9af3 | ||
|
|
d38b9db255 | ||
|
|
761ad88c93 |
29
Dockerfiles/1.0.30/Dockerfile
Normal file
29
Dockerfiles/1.0.30/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM continuumio/miniconda:4.5.11
|
||||
|
||||
# install git
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||
|
||||
# create a new conda environment named azureml
|
||||
RUN conda create -n azureml -y -q Python=3.6
|
||||
|
||||
# install additional packages used by sample notebooks. this is optional
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||
|
||||
# install azurmel-sdk components
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.30"]
|
||||
|
||||
# clone Azure ML GitHub sample notebooks
|
||||
RUN cd /home && git clone -b "azureml-sdk-1.0.30" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
# generate jupyter configuration file
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||
|
||||
# set an emtpy token for Jupyter to remove authentication.
|
||||
# this is NOT recommended for production environment
|
||||
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||
|
||||
# open up port 8887 on the container
|
||||
EXPOSE 8887
|
||||
|
||||
# start Jupyter notebook server on port 8887 when the container starts
|
||||
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||
29
Dockerfiles/1.0.33/Dockerfile
Normal file
29
Dockerfiles/1.0.33/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM continuumio/miniconda:4.5.11
|
||||
|
||||
# install git
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||
|
||||
# create a new conda environment named azureml
|
||||
RUN conda create -n azureml -y -q Python=3.6
|
||||
|
||||
# install additional packages used by sample notebooks. this is optional
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||
|
||||
# install azurmel-sdk components
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.33"]
|
||||
|
||||
# clone Azure ML GitHub sample notebooks
|
||||
RUN cd /home && git clone -b "azureml-sdk-1.0.33" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
# generate jupyter configuration file
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||
|
||||
# set an emtpy token for Jupyter to remove authentication.
|
||||
# this is NOT recommended for production environment
|
||||
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||
|
||||
# open up port 8887 on the container
|
||||
EXPOSE 8887
|
||||
|
||||
# start Jupyter notebook server on port 8887 when the container starts
|
||||
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||
17
NBSETUP.md
17
NBSETUP.md
@@ -1,6 +1,4 @@
|
||||
# Setting up environment
|
||||
|
||||
---
|
||||
# Set up your notebook environment for Azure Machine Learning
|
||||
|
||||
To run the notebooks in this repository use one of following options.
|
||||
|
||||
@@ -12,9 +10,7 @@ Azure Notebooks is a hosted Jupyter-based notebook service in the Azure cloud. A
|
||||
1. Follow the instructions in the [Configuration](configuration.ipynb) notebook to create and connect to a workspace
|
||||
1. Open one of the sample notebooks
|
||||
|
||||
**Make sure the Azure Notebook kernel is set to `Python 3.6`** when you open a notebook
|
||||
|
||||

|
||||
**Make sure the Azure Notebook kernel is set to `Python 3.6`** when you open a notebook by choosing Kernel > Change Kernel > Python 3.6 from the menus.
|
||||
|
||||
## **Option 2: Use your own notebook server**
|
||||
|
||||
@@ -31,9 +27,6 @@ git clone https://github.com/Azure/MachineLearningNotebooks.git
|
||||
# install the base SDK and a Jupyter notebook server
|
||||
pip install azureml-sdk[notebooks]
|
||||
|
||||
# install the data prep component
|
||||
pip install azureml-dataprep
|
||||
|
||||
# install model explainability component
|
||||
pip install azureml-sdk[explain]
|
||||
|
||||
@@ -58,8 +51,7 @@ Please make sure you start with the [Configuration](configuration.ipynb) noteboo
|
||||
|
||||
### Video walkthrough:
|
||||
|
||||
[](https://youtu.be/VIsXeTuW3FU)
|
||||
|
||||
[!VIDEO https://youtu.be/VIsXeTuW3FU]
|
||||
|
||||
## **Option 3: Use Docker**
|
||||
|
||||
@@ -90,9 +82,6 @@ Now you can point your browser to http://localhost:8887. We recommend that you s
|
||||
If you need additional Azure ML SDK components, you can either modify the Docker files before you build the Docker images to add additional steps, or install them through command line in the live container after you build the Docker image. For example:
|
||||
|
||||
```sh
|
||||
# install dataprep components
|
||||
pip install azureml-dataprep
|
||||
|
||||
# install the core SDK and automated ml components
|
||||
pip install azureml-sdk[automl]
|
||||
|
||||
|
||||
11
README.md
11
README.md
@@ -11,7 +11,8 @@ pip install azureml-sdk
|
||||
Read more detailed instructions on [how to set up your environment](./NBSETUP.md) using Azure Notebook service, your own Jupyter notebook server, or Docker.
|
||||
|
||||
## How to navigate and use the example notebooks?
|
||||
You should always run the [Configuration](./configuration.ipynb) notebook first when setting up a notebook library on a new machine or in a new environment. It configures your notebook library to connect to an Azure Machine Learning workspace, and sets up your workspace and compute to be used by many of the other examples.
|
||||
If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](./configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace.
|
||||
It configures your notebook library to connect to an Azure Machine Learning workspace, and sets up your workspace and compute to be used by many of the other examples.
|
||||
|
||||
If you want to...
|
||||
|
||||
@@ -53,4 +54,10 @@ The [How to use Azure ML](./how-to-use-azureml) folder contains specific example
|
||||
Visit following repos to see projects contributed by Azure ML users:
|
||||
|
||||
- [Fine tune natural language processing models using Azure Machine Learning service](https://github.com/Microsoft/AzureML-BERT)
|
||||
- [Fashion MNIST with Azure ML SDK](https://github.com/amynic/azureml-sdk-fashion)
|
||||
- [Fashion MNIST with Azure ML SDK](https://github.com/amynic/azureml-sdk-fashion)
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
@@ -287,6 +287,8 @@ Notice how the parameters are modified when using the CPU-only mode.
|
||||
|
||||
The outputs of the script can be observed in the master notebook as the script is executed
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Learn how to use Azure Machine Learning services for experimentation and model management.
|
||||
|
||||
As a pre-requisite, run the [configuration Notebook](../configuration.ipynb) notebook first to set up your Azure ML Workspace. Then, run the notebooks in following recommended order.
|
||||
If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration Notebook](../configuration.ipynb) first if you haven't already to establish your connection to the AzureML Workspace. Then, run the notebooks in following recommended order.
|
||||
|
||||
* [train-within-notebook](./training/train-within-notebook): Train a model hile tracking run history, and learn how to deploy the model as web service to Azure Container Instance.
|
||||
* [train-on-local](./training/train-on-local): Learn how to submit a run to local computer and use Azure ML managed run configuration.
|
||||
@@ -15,3 +15,6 @@ As a pre-requisite, run the [configuration Notebook](../configuration.ipynb) not
|
||||
* [enable-app-insights-in-production-service](./deployment/enable-app-insights-in-production-service) Learn how to use App Insights with production web service.
|
||||
|
||||
Find quickstarts, end-to-end tutorials, and how-tos on the [official documentation site for Azure Machine Learning service](https://docs.microsoft.com/en-us/azure/machine-learning/service/).
|
||||
|
||||
|
||||

|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Table of Contents
|
||||
1. [Automated ML Introduction](#introduction)
|
||||
1. [Running samples in Azure Notebooks](#jupyter)
|
||||
1. [Running samples in Azure Databricks](#databricks)
|
||||
1. [Running samples in a Local Conda environment](#localconda)
|
||||
1. [Setup using Azure Notebooks](#jupyter)
|
||||
1. [Setup using Azure Databricks](#databricks)
|
||||
1. [Setup using a Local Conda environment](#localconda)
|
||||
1. [Automated ML SDK Sample Notebooks](#samples)
|
||||
1. [Documentation](#documentation)
|
||||
1. [Running using python command](#pythoncommand)
|
||||
@@ -13,15 +13,15 @@
|
||||
Automated machine learning (automated ML) builds high quality machine learning models for you by automating model and hyperparameter selection. Bring a labelled dataset that you want to build a model for, automated ML will give you a high quality machine learning model that you can use for predictions.
|
||||
|
||||
|
||||
If you are new to Data Science, AutoML will help you get jumpstarted by simplifying machine learning model building. It abstracts you from needing to perform model selection, hyperparameter selection and in one step creates a high quality trained model for you to use.
|
||||
If you are new to Data Science, automated ML will help you get jumpstarted by simplifying machine learning model building. It abstracts you from needing to perform model selection, hyperparameter selection and in one step creates a high quality trained model for you to use.
|
||||
|
||||
If you are an experienced data scientist, AutoML will help increase your productivity by intelligently performing the model and hyperparameter selection for your training and generates high quality models much quicker than manually specifying several combinations of the parameters and running training jobs. AutoML provides visibility and access to all the training jobs and the performance characteristics of the models to help you further tune the pipeline if you desire.
|
||||
If you are an experienced data scientist, automated ML will help increase your productivity by intelligently performing the model and hyperparameter selection for your training and generates high quality models much quicker than manually specifying several combinations of the parameters and running training jobs. Automated ML provides visibility and access to all the training jobs and the performance characteristics of the models to help you further tune the pipeline if you desire.
|
||||
|
||||
Below are the three execution environments supported by AutoML.
|
||||
Below are the three execution environments supported by automated ML.
|
||||
|
||||
|
||||
<a name="jupyter"></a>
|
||||
## Running samples in Azure Notebooks - Jupyter based notebooks in the Azure cloud
|
||||
## Setup using Azure Notebooks - Jupyter based notebooks in the Azure cloud
|
||||
|
||||
1. [](https://aka.ms/aml-clone-azure-notebooks)
|
||||
[Import sample notebooks ](https://aka.ms/aml-clone-azure-notebooks) into Azure Notebooks.
|
||||
@@ -29,7 +29,7 @@ Below are the three execution environments supported by AutoML.
|
||||
1. Open one of the sample notebooks.
|
||||
|
||||
<a name="databricks"></a>
|
||||
## Running samples in Azure Databricks
|
||||
## Setup using Azure Databricks
|
||||
|
||||
**NOTE**: Please create your Azure Databricks cluster as v4.x (high concurrency preferred) with **Python 3** (dropdown).
|
||||
**NOTE**: You should at least have contributor access to your Azure subcription to run the notebook.
|
||||
@@ -39,7 +39,7 @@ Below are the three execution environments supported by AutoML.
|
||||
- Attach the notebook to the cluster.
|
||||
|
||||
<a name="localconda"></a>
|
||||
## Running samples in a Local Conda environment
|
||||
## Setup using a Local Conda environment
|
||||
|
||||
To run these notebook on your own notebook server, use these installation instructions.
|
||||
The instructions below will install everything you need and then start a Jupyter notebook.
|
||||
@@ -49,11 +49,15 @@ The instructions below will install everything you need and then start a Jupyter
|
||||
There's no need to install mini-conda specifically.
|
||||
|
||||
### 2. Downloading the sample notebooks
|
||||
- Download the sample notebooks from [GitHub](https://github.com/Azure/MachineLearningNotebooks) as zip and extract the contents to a local directory. The AutoML sample notebooks are in the "automl" folder.
|
||||
- Download the sample notebooks from [GitHub](https://github.com/Azure/MachineLearningNotebooks) as zip and extract the contents to a local directory. The automated ML sample notebooks are in the "automated-machine-learning" folder.
|
||||
|
||||
### 3. Setup a new conda environment
|
||||
The **automl/automl_setup** script creates a new conda environment, installs the necessary packages, configures the widget and starts a jupyter notebook.
|
||||
It takes the conda environment name as an optional parameter. The default conda environment name is azure_automl. The exact command depends on the operating system. See the specific sections below for Windows, Mac and Linux. It can take about 10 minutes to execute.
|
||||
The **automl_setup** script creates a new conda environment, installs the necessary packages, configures the widget and starts a jupyter notebook. It takes the conda environment name as an optional parameter. The default conda environment name is azure_automl. The exact command depends on the operating system. See the specific sections below for Windows, Mac and Linux. It can take about 10 minutes to execute.
|
||||
|
||||
Packages installed by the **automl_setup** script:
|
||||
<ul><li>python</li><li>nb_conda</li><li>matplotlib</li><li>numpy</li><li>cython</li><li>urllib3</li><li>scipy</li><li>scikit-learn</li><li>pandas</li><li>tensorflow</li><li>py-xgboost</li><li>azureml-sdk</li><li>azureml-widgets</li><li>pandas-ml</li></ul>
|
||||
|
||||
For more details refer to the [automl_env.yml](./automl_env.yml)
|
||||
## Windows
|
||||
Start an **Anaconda Prompt** window, cd to the **how-to-use-azureml/automated-machine-learning** folder where the sample notebooks were extracted and then run:
|
||||
```
|
||||
@@ -81,7 +85,7 @@ bash automl_setup_linux.sh
|
||||
|
||||
### 5. Running Samples
|
||||
- Please make sure you use the Python [conda env:azure_automl] kernel when trying the sample Notebooks.
|
||||
- Follow the instructions in the individual notebooks to explore various features in AutoML
|
||||
- Follow the instructions in the individual notebooks to explore various features in automated ML.
|
||||
|
||||
### 6. Starting jupyter notebook manually
|
||||
To start your Jupyter notebook manually, use:
|
||||
@@ -103,22 +107,22 @@ jupyter notebook
|
||||
|
||||
- [auto-ml-classification.ipynb](classification/auto-ml-classification.ipynb)
|
||||
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
|
||||
- Simple example of using Auto ML for classification
|
||||
- Simple example of using automated ML for classification
|
||||
- Uses local compute for training
|
||||
|
||||
- [auto-ml-regression.ipynb](regression/auto-ml-regression.ipynb)
|
||||
- Dataset: scikit learn's [diabetes dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_diabetes.html)
|
||||
- Simple example of using Auto ML for regression
|
||||
- Simple example of using automated ML for regression
|
||||
- Uses local compute for training
|
||||
|
||||
- [auto-ml-remote-execution.ipynb](remote-execution/auto-ml-remote-execution.ipynb)
|
||||
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
|
||||
- Example of using Auto ML for classification using a remote linux DSVM for training
|
||||
- Example of using automated ML for classification using a remote linux DSVM for training
|
||||
- Parallel execution of iterations
|
||||
- Async tracking of progress
|
||||
- Cancelling individual iterations or entire run
|
||||
- Retrieving models for any iteration or logged metric
|
||||
- Specify automl settings as kwargs
|
||||
- Specify automated ML settings as kwargs
|
||||
|
||||
- [auto-ml-remote-amlcompute.ipynb](remote-batchai/auto-ml-remote-amlcompute.ipynb)
|
||||
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
|
||||
@@ -127,7 +131,7 @@ jupyter notebook
|
||||
- Async tracking of progress
|
||||
- Cancelling individual iterations or entire run
|
||||
- Retrieving models for any iteration or logged metric
|
||||
- Specify automl settings as kwargs
|
||||
- Specify automated ML settings as kwargs
|
||||
|
||||
- [auto-ml-remote-attach.ipynb](remote-attach/auto-ml-remote-attach.ipynb)
|
||||
- Dataset: Scikit learn's [20newsgroup](http://scikit-learn.org/stable/datasets/twenty_newsgroups.html)
|
||||
@@ -148,8 +152,8 @@ jupyter notebook
|
||||
|
||||
- [auto-ml-exploring-previous-runs.ipynb](exploring-previous-runs/auto-ml-exploring-previous-runs.ipynb)
|
||||
- List all projects for the workspace
|
||||
- List all AutoML Runs for a given project
|
||||
- Get details for a AutoML Run. (Automl settings, run widget & all metrics)
|
||||
- List all automated ML Runs for a given project
|
||||
- Get details for a automated ML Run. (automated ML settings, run widget & all metrics)
|
||||
- Download fitted pipeline for any iteration
|
||||
|
||||
- [auto-ml-remote-execution-with-datastore.ipynb](remote-execution-with-datastore/auto-ml-remote-execution-with-datastore.ipynb)
|
||||
@@ -158,7 +162,7 @@ jupyter notebook
|
||||
|
||||
- [auto-ml-classification-with-deployment.ipynb](classification-with-deployment/auto-ml-classification-with-deployment.ipynb)
|
||||
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
|
||||
- Simple example of using Auto ML for classification
|
||||
- Simple example of using automated ML for classification
|
||||
- Registering the model
|
||||
- Creating Image and creating aci service
|
||||
- Testing the aci service
|
||||
@@ -178,20 +182,20 @@ jupyter notebook
|
||||
|
||||
- [auto-ml-classification-with-whitelisting.ipynb](classification-with-whitelisting/auto-ml-classification-with-whitelisting.ipynb)
|
||||
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
|
||||
- Simple example of using Auto ML for classification with whitelisting tensorflow models.
|
||||
- Simple example of using automated ML for classification with whitelisting tensorflow models.
|
||||
- Uses local compute for training
|
||||
|
||||
- [auto-ml-forecasting-energy-demand.ipynb](forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb)
|
||||
- Dataset: [NYC energy demand data](forecasting-a/nyc_energy.csv)
|
||||
- Example of using AutoML for training a forecasting model
|
||||
- Example of using automated ML for training a forecasting model
|
||||
|
||||
- [auto-ml-forecasting-orange-juice-sales.ipynb](forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb)
|
||||
- Dataset: [Dominick's grocery sales of orange juice](forecasting-b/dominicks_OJ.csv)
|
||||
- Example of training an AutoML forecasting model on multiple time-series
|
||||
- Example of training an automated ML forecasting model on multiple time-series
|
||||
|
||||
- [auto-ml-classification-with-onnx.ipynb](classification-with-onnx/auto-ml-classification-with-onnx.ipynb)
|
||||
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
|
||||
- Simple example of using Auto ML for classification with ONNX models
|
||||
- Simple example of using automated ML for classification with ONNX models
|
||||
- Uses local compute for training
|
||||
|
||||
<a name="documentation"></a>
|
||||
@@ -259,7 +263,7 @@ There are several reasons why the DsvmCompute.create can fail. The reason is us
|
||||
2) `The requested VM size xxxxx is not available in the current region.` You can select a different region or vm_size.
|
||||
|
||||
## Remote run: Unable to establish SSH connection
|
||||
AutoML uses the SSH protocol to communicate with remote DSVMs. This defaults to port 22. Possible causes for this error are:
|
||||
Automated ML uses the SSH protocol to communicate with remote DSVMs. This defaults to port 22. Possible causes for this error are:
|
||||
1) The DSVM is not ready for SSH connections. When DSVM creation completes, the DSVM might still not be ready to acceept SSH connections. The sample notebooks have a one minute delay to allow for this.
|
||||
2) Your Azure Subscription may restrict the IP address ranges that can access the DSVM on port 22. You can check this in the Azure Portal by selecting the Virtual Machine and then clicking Networking. The Virtual Machine name is the name that you provided in the notebook plus 10 alpha numeric characters to make the name unique. The Inbound Port Rules define what can access the VM on specific ports. Note that there is a priority priority order. So, a Deny entry with a low priority number will override a Allow entry with a higher priority number.
|
||||
|
||||
@@ -270,13 +274,13 @@ This is often an issue with the `get_data` method.
|
||||
3) You can get to the error log for the setup iteration by clicking the `Click here to see the run in Azure portal` link, click `Back to Experiment`, click on the highest run number and then click on Logs.
|
||||
|
||||
## Remote run: disk full
|
||||
AutoML creates files under /tmp/azureml_runs for each iteration that it runs. It creates a folder with the iteration id. For example: AutoML_9a038a18-77cc-48f1-80fb-65abdbc33abe_93. Under this, there is a azureml-logs folder, which contains logs. If you run too many iterations on the same DSVM, these files can fill the disk.
|
||||
Automated ML creates files under /tmp/azureml_runs for each iteration that it runs. It creates a folder with the iteration id. For example: AutoML_9a038a18-77cc-48f1-80fb-65abdbc33abe_93. Under this, there is a azureml-logs folder, which contains logs. If you run too many iterations on the same DSVM, these files can fill the disk.
|
||||
You can delete the files under /tmp/azureml_runs or just delete the VM and create a new one.
|
||||
If your get_data downloads files, make sure the delete them or they can use disk space as well.
|
||||
When using DataStore, it is good to specify an absolute path for the files so that they are downloaded just once. If you specify a relative path, it will download a file for each iteration.
|
||||
|
||||
## Remote run: Iterations fail and the log contains "MemoryError"
|
||||
This can be caused by insufficient memory on the DSVM. AutoML loads all training data into memory. So, the available memory should be more than the training data size.
|
||||
This can be caused by insufficient memory on the DSVM. Automated ML loads all training data into memory. So, the available memory should be more than the training data size.
|
||||
If you are using a remote DSVM, memory is needed for each concurrent iteration. The max_concurrent_iterations setting specifies the maximum concurrent iterations. For example, if the training data size is 8Gb and max_concurrent_iterations is set to 10, the minimum memory required is at least 80Gb.
|
||||
To resolve this issue, allocate a DSVM with more memory or reduce the value specified for max_concurrent_iterations.
|
||||
|
||||
|
||||
@@ -1,22 +1,21 @@
|
||||
name: azure_automl
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.5.2 and later.
|
||||
- python>=3.5.2,<3.6.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy>=1.11.0,<1.15.0
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scipy>=1.0.0,<=1.1.0
|
||||
- scikit-learn>=0.18.0,<=0.19.1
|
||||
- pandas>=0.22.0,<0.23.0
|
||||
- tensorflow>=1.12.0
|
||||
- py-xgboost<=0.80
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-sdk[automl,explain]
|
||||
- azureml-widgets
|
||||
- pandas_ml
|
||||
|
||||
name: azure_automl
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.5.2 and later.
|
||||
- python>=3.5.2,<3.6.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy>=1.11.0,<=1.16.2
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scipy>=1.0.0,<=1.1.0
|
||||
- scikit-learn>=0.19.0,<=0.20.3
|
||||
- pandas>=0.22.0,<0.23.0
|
||||
- py-xgboost<=0.80
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-sdk[automl,explain]
|
||||
- azureml-widgets
|
||||
- pandas_ml
|
||||
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
name: azure_automl
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.5.2 and later.
|
||||
- python>=3.5.2,<3.6.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy>=1.15.3
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scipy>=1.0.0,<=1.1.0
|
||||
- scikit-learn>=0.18.0,<=0.19.1
|
||||
- pandas>=0.22.0,<0.23.0
|
||||
- tensorflow>=1.12.0
|
||||
- py-xgboost<=0.80
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-sdk[automl,explain]
|
||||
- azureml-widgets
|
||||
- pandas_ml
|
||||
|
||||
|
||||
@@ -1,51 +1,51 @@
|
||||
@echo off
|
||||
set conda_env_name=%1
|
||||
set automl_env_file=%2
|
||||
set options=%3
|
||||
set PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||
|
||||
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl"
|
||||
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml"
|
||||
|
||||
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
||||
|
||||
call conda activate %conda_env_name% 2>nul:
|
||||
|
||||
if not errorlevel 1 (
|
||||
echo Upgrading azureml-sdk[automl,notebooks,explain] in existing conda environment %conda_env_name%
|
||||
call pip install --upgrade azureml-sdk[automl,notebooks,explain]
|
||||
if errorlevel 1 goto ErrorExit
|
||||
) else (
|
||||
call conda env create -f %automl_env_file% -n %conda_env_name%
|
||||
)
|
||||
|
||||
call conda activate %conda_env_name% 2>nul:
|
||||
if errorlevel 1 goto ErrorExit
|
||||
|
||||
call python -m ipykernel install --user --name %conda_env_name% --display-name "Python (%conda_env_name%)"
|
||||
|
||||
REM azureml.widgets is now installed as part of the pip install under the conda env.
|
||||
REM Removing the old user install so that the notebooks will use the latest widget.
|
||||
call jupyter nbextension uninstall --user --py azureml.widgets
|
||||
|
||||
echo.
|
||||
echo.
|
||||
echo ***************************************
|
||||
echo * AutoML setup completed successfully *
|
||||
echo ***************************************
|
||||
IF NOT "%options%"=="nolaunch" (
|
||||
echo.
|
||||
echo Starting jupyter notebook - please run the configuration notebook
|
||||
echo.
|
||||
jupyter notebook --log-level=50 --notebook-dir='..\..'
|
||||
)
|
||||
|
||||
goto End
|
||||
|
||||
:YmlMissing
|
||||
echo File %automl_env_file% not found.
|
||||
|
||||
:ErrorExit
|
||||
echo Install failed
|
||||
|
||||
@echo off
|
||||
set conda_env_name=%1
|
||||
set automl_env_file=%2
|
||||
set options=%3
|
||||
set PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||
|
||||
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl"
|
||||
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml"
|
||||
|
||||
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
||||
|
||||
call conda activate %conda_env_name% 2>nul:
|
||||
|
||||
if not errorlevel 1 (
|
||||
echo Upgrading azureml-sdk[automl,notebooks,explain] in existing conda environment %conda_env_name%
|
||||
call pip install --upgrade azureml-sdk[automl,notebooks,explain]
|
||||
if errorlevel 1 goto ErrorExit
|
||||
) else (
|
||||
call conda env create -f %automl_env_file% -n %conda_env_name%
|
||||
)
|
||||
|
||||
call conda activate %conda_env_name% 2>nul:
|
||||
if errorlevel 1 goto ErrorExit
|
||||
|
||||
call python -m ipykernel install --user --name %conda_env_name% --display-name "Python (%conda_env_name%)"
|
||||
|
||||
REM azureml.widgets is now installed as part of the pip install under the conda env.
|
||||
REM Removing the old user install so that the notebooks will use the latest widget.
|
||||
call jupyter nbextension uninstall --user --py azureml.widgets
|
||||
|
||||
echo.
|
||||
echo.
|
||||
echo ***************************************
|
||||
echo * AutoML setup completed successfully *
|
||||
echo ***************************************
|
||||
IF NOT "%options%"=="nolaunch" (
|
||||
echo.
|
||||
echo Starting jupyter notebook - please run the configuration notebook
|
||||
echo.
|
||||
jupyter notebook --log-level=50 --notebook-dir='..\..'
|
||||
)
|
||||
|
||||
goto End
|
||||
|
||||
:YmlMissing
|
||||
echo File %automl_env_file% not found.
|
||||
|
||||
:ErrorExit
|
||||
echo Install failed
|
||||
|
||||
:End
|
||||
@@ -12,7 +12,7 @@ fi
|
||||
|
||||
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||
then
|
||||
AUTOML_ENV_FILE="automl_env_mac.yml"
|
||||
AUTOML_ENV_FILE="automl_env.yml"
|
||||
fi
|
||||
|
||||
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||
@@ -31,7 +31,6 @@ else
|
||||
conda install lightgbm -c conda-forge -y &&
|
||||
python -m ipykernel install --user --name $CONDA_ENV_NAME --display-name "Python ($CONDA_ENV_NAME)" &&
|
||||
jupyter nbextension uninstall --user --py azureml.widgets &&
|
||||
pip install numpy==1.15.3 &&
|
||||
echo "" &&
|
||||
echo "" &&
|
||||
echo "***************************************" &&
|
||||
|
||||
@@ -162,7 +162,14 @@
|
||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|<i>Exit Criteria [optional]</i><br><br>iterations<br>experiment_timeout_minutes|An optional duration parameter that says how long AutoML should be run.<br>This could be either number of iterations or number of minutes AutoML is allowed to run. <br><br><i>iterations</i> number of algorithm iterations to run<br><i>experiment_timeout_minutes</i> is the number of minutes that AutoML should run<br><br>By default, this is set to stop whenever AutoML determines that progress in scores is not being made|"
|
||||
"|\n",
|
||||
"\n",
|
||||
"Automated machine learning trains multiple machine learning pipelines. Each pipelines training is known as an iteration.\n",
|
||||
"* You can specify a maximum number of iterations using the `iterations` parameter.\n",
|
||||
"* You can specify a maximum time for the run using the `experiment_timeout_minutes` parameter.\n",
|
||||
"* If you specify neither the `iterations` nor the `experiment_timeout_minutes`, automated ML keeps running iterations while it continues to see improvements in the scores.\n",
|
||||
"\n",
|
||||
"The following example doesn't specify `iterations` or `experiment_timeout_minutes` and so runs until the scores stop improving.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -0,0 +1,493 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Automated Machine Learning\n",
|
||||
"**BikeShare Demand Forecasting**\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Data](#Data)\n",
|
||||
"1. [Train](#Train)\n",
|
||||
"1. [Evaluate](#Evaluate)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"In this example, we show how AutoML can be used for bike share forecasting.\n",
|
||||
"\n",
|
||||
"The purpose is to demonstrate how to take advantage of the built-in holiday featurization, access the feature names, and further demonstrate how to work with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
|
||||
"\n",
|
||||
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
||||
"\n",
|
||||
"In this notebook you would see\n",
|
||||
"1. Creating an Experiment in an existing Workspace\n",
|
||||
"2. Instantiating AutoMLConfig with new task type \"forecasting\" for timeseries data training, and other timeseries related settings: for this dataset we use the basic one: \"time_column_name\" \n",
|
||||
"3. Training the Model using local compute\n",
|
||||
"4. Exploring the results\n",
|
||||
"5. Viewing the engineered names for featurized data and featurization summary for all raw features\n",
|
||||
"6. Testing the fitted model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"import logging\n",
|
||||
"import warnings\n",
|
||||
"# Squash warning messages for cleaner output in the notebook\n",
|
||||
"warnings.showwarning = lambda *args, **kwargs: None\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"from sklearn.metrics import mean_absolute_error, mean_squared_error"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As part of the setup you have already created a <b>Workspace</b>. For AutoML you would need to create an <b>Experiment</b>. An <b>Experiment</b> is a named object in a <b>Workspace</b>, which is used to run experiments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'automl-bikeshareforecasting'\n",
|
||||
"# project folder\n",
|
||||
"project_folder = './sample_projects/automl-local-bikeshareforecasting'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['SDK version'] = azureml.core.VERSION\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Project Directory'] = project_folder\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data\n",
|
||||
"Read bike share demand data from file, and preview data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = pd.read_csv('bike-no.csv', parse_dates=['date'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's set up what we know abou the dataset. \n",
|
||||
"\n",
|
||||
"**Target column** is what we want to forecast.\n",
|
||||
"\n",
|
||||
"**Time column** is the time axis along which to predict.\n",
|
||||
"\n",
|
||||
"**Grain** is another word for an individual time series in your dataset. Grains are identified by values of the columns listed `grain_column_names`, for example \"store\" and \"item\" if your data has multiple time series of sales, one series for each combination of store and item sold.\n",
|
||||
"\n",
|
||||
"This dataset has only one time series. Please see the [orange juice notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales) for an example of a multi-time series dataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"target_column_name = 'cnt'\n",
|
||||
"time_column_name = 'date'\n",
|
||||
"grain_column_names = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Split the data\n",
|
||||
"\n",
|
||||
"The first split we make is into train and test sets. Note we are splitting on time."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train = data[data[time_column_name] < '2012-09-01']\n",
|
||||
"test = data[data[time_column_name] >= '2012-09-01']\n",
|
||||
"\n",
|
||||
"X_train = train.copy()\n",
|
||||
"y_train = X_train.pop(target_column_name).values\n",
|
||||
"\n",
|
||||
"X_test = test.copy()\n",
|
||||
"y_test = X_test.pop(target_column_name).values\n",
|
||||
"\n",
|
||||
"print(X_train.shape)\n",
|
||||
"print(y_train.shape)\n",
|
||||
"print(X_test.shape)\n",
|
||||
"print(y_test.shape)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Setting forecaster maximum horizon \n",
|
||||
"\n",
|
||||
"Assuming your test data forms a full and regular time series(regular time intervals and no holes), \n",
|
||||
"the maximum horizon you will need to forecast is the length of the longest grain in your test set."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if len(grain_column_names) == 0:\n",
|
||||
" max_horizon = len(X_test)\n",
|
||||
"else:\n",
|
||||
" max_horizon = X_test.groupby(grain_column_names)[time_column_name].count().max()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train\n",
|
||||
"\n",
|
||||
"Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.\n",
|
||||
"\n",
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**task**|forecasting|\n",
|
||||
"|**primary_metric**|This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>\n",
|
||||
"|**iterations**|Number of iterations. In each iteration, Auto ML trains a specific pipeline on the given data|\n",
|
||||
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|**country**|The country used to generate holiday features. These should be ISO 3166 two-letter country codes (i.e. 'US', 'GB').|\n",
|
||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"time_column_name = 'date'\n",
|
||||
"automl_settings = {\n",
|
||||
" \"time_column_name\": time_column_name,\n",
|
||||
" # these columns are a breakdown of the total and therefore a leak\n",
|
||||
" \"drop_column_names\": ['casual', 'registered'],\n",
|
||||
" # knowing the country allows Automated ML to bring in holidays\n",
|
||||
" \"country\" : 'US',\n",
|
||||
" \"max_horizon\" : max_horizon,\n",
|
||||
" \"target_lags\": 1 \n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'forecasting', \n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" iterations = 10,\n",
|
||||
" iteration_timeout_minutes = 5,\n",
|
||||
" X = X_train,\n",
|
||||
" y = y_train,\n",
|
||||
" n_cross_validations = 3, \n",
|
||||
" path=project_folder,\n",
|
||||
" verbosity = logging.INFO,\n",
|
||||
" **automl_settings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will now run the experiment, starting with 10 iterations of model search. Experiment can be continued for more iterations if the results are not yet good. You will see the currently running iterations printing to the console."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_run = experiment.submit(automl_config, show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Displaying the run objects gives you links to the visual tools in the Azure Portal. Go try them!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve the Best Model\n",
|
||||
"Below we select the best pipeline from our iterations. The get_output method on automl_classifier returns the best run and the fitted model for the last fit invocation. There are overloads on get_output that allow you to retrieve the best run and fitted model for any logged metric or a particular iteration."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = local_run.get_output()\n",
|
||||
"fitted_model.steps"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### View the engineered names for featurized data\n",
|
||||
"\n",
|
||||
"You can accees the engineered feature names generated in time-series featurization. Note that a number of named holiday periods are represented. We recommend that you have at least one year of data when using this feature to ensure that all yearly holidays are captured in the training featurization."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### View the featurization summary\n",
|
||||
"\n",
|
||||
"You can also see what featurization steps were performed on different raw features in the user data. For each raw feature in the user data, the following information is displayed:\n",
|
||||
"\n",
|
||||
"- Raw feature name\n",
|
||||
"- Number of engineered features formed out of this raw feature\n",
|
||||
"- Type detected\n",
|
||||
"- If feature was dropped\n",
|
||||
"- List of feature transformations for the raw feature"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Test the Best Fitted Model\n",
|
||||
"\n",
|
||||
"Predict on training and test set, and calculate residual values.\n",
|
||||
"\n",
|
||||
"We always score on the original dataset whose schema matches the scheme of the training dataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_test.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_query = y_test.copy().astype(np.float)\n",
|
||||
"y_query.fill(np.NaN)\n",
|
||||
"y_fcst, X_trans = fitted_model.forecast(X_test, y_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It is a good practice to always align the output explicitly to the input, as the count and order of the rows may have changed during transformations that span multiple rows."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def align_outputs(y_predicted, X_trans, X_test, y_test, predicted_column_name = 'predicted'):\n",
|
||||
" \"\"\"\n",
|
||||
" Demonstrates how to get the output aligned to the inputs\n",
|
||||
" using pandas indexes. Helps understand what happened if\n",
|
||||
" the output's shape differs from the input shape, or if\n",
|
||||
" the data got re-sorted by time and grain during forecasting.\n",
|
||||
" \n",
|
||||
" Typical causes of misalignment are:\n",
|
||||
" * we predicted some periods that were missing in actuals -> drop from eval\n",
|
||||
" * model was asked to predict past max_horizon -> increase max horizon\n",
|
||||
" * data at start of X_test was needed for lags -> provide previous periods\n",
|
||||
" \"\"\"\n",
|
||||
" df_fcst = pd.DataFrame({predicted_column_name : y_predicted})\n",
|
||||
" # y and X outputs are aligned by forecast() function contract\n",
|
||||
" df_fcst.index = X_trans.index\n",
|
||||
" \n",
|
||||
" # align original X_test to y_test \n",
|
||||
" X_test_full = X_test.copy()\n",
|
||||
" X_test_full[target_column_name] = y_test\n",
|
||||
"\n",
|
||||
" # X_test_full's index does not include origin, so reset for merge\n",
|
||||
" df_fcst.reset_index(inplace=True)\n",
|
||||
" X_test_full = X_test_full.reset_index().drop(columns='index')\n",
|
||||
" together = df_fcst.merge(X_test_full, how='right')\n",
|
||||
" \n",
|
||||
" # drop rows where prediction or actuals are nan \n",
|
||||
" # happens because of missing actuals \n",
|
||||
" # or at edges of time due to lags/rolling windows\n",
|
||||
" clean = together[together[[target_column_name, predicted_column_name]].notnull().all(axis=1)]\n",
|
||||
" return(clean)\n",
|
||||
"\n",
|
||||
"df_all = align_outputs(y_fcst, X_trans, X_test, y_test)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def MAPE(actual, pred):\n",
|
||||
" \"\"\"\n",
|
||||
" Calculate mean absolute percentage error.\n",
|
||||
" Remove NA and values where actual is close to zero\n",
|
||||
" \"\"\"\n",
|
||||
" not_na = ~(np.isnan(actual) | np.isnan(pred))\n",
|
||||
" not_zero = ~np.isclose(actual, 0.0)\n",
|
||||
" actual_safe = actual[not_na & not_zero]\n",
|
||||
" pred_safe = pred[not_na & not_zero]\n",
|
||||
" APE = 100*np.abs((actual_safe - pred_safe)/actual_safe)\n",
|
||||
" return np.mean(APE)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"Simple forecasting model\")\n",
|
||||
"rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all['predicted']))\n",
|
||||
"print(\"[Test Data] \\nRoot Mean squared error: %.2f\" % rmse)\n",
|
||||
"mae = mean_absolute_error(df_all[target_column_name], df_all['predicted'])\n",
|
||||
"print('mean_absolute_error score: %.2f' % mae)\n",
|
||||
"print('MAPE: %.2f' % MAPE(df_all[target_column_name], df_all['predicted']))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib notebook\n",
|
||||
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"plt.show()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "xiaga@microsoft.com, tosingli@microsoft.com"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,732 @@
|
||||
instant,date,season,yr,mnth,weekday,weathersit,temp,atemp,hum,windspeed,casual,registered,cnt
|
||||
1,1/1/2011,1,0,1,6,2,0.344167,0.363625,0.805833,0.160446,331,654,985
|
||||
2,1/2/2011,1,0,1,0,2,0.363478,0.353739,0.696087,0.248539,131,670,801
|
||||
3,1/3/2011,1,0,1,1,1,0.196364,0.189405,0.437273,0.248309,120,1229,1349
|
||||
4,1/4/2011,1,0,1,2,1,0.2,0.212122,0.590435,0.160296,108,1454,1562
|
||||
5,1/5/2011,1,0,1,3,1,0.226957,0.22927,0.436957,0.1869,82,1518,1600
|
||||
6,1/6/2011,1,0,1,4,1,0.204348,0.233209,0.518261,0.0895652,88,1518,1606
|
||||
7,1/7/2011,1,0,1,5,2,0.196522,0.208839,0.498696,0.168726,148,1362,1510
|
||||
8,1/8/2011,1,0,1,6,2,0.165,0.162254,0.535833,0.266804,68,891,959
|
||||
9,1/9/2011,1,0,1,0,1,0.138333,0.116175,0.434167,0.36195,54,768,822
|
||||
10,1/10/2011,1,0,1,1,1,0.150833,0.150888,0.482917,0.223267,41,1280,1321
|
||||
11,1/11/2011,1,0,1,2,2,0.169091,0.191464,0.686364,0.122132,43,1220,1263
|
||||
12,1/12/2011,1,0,1,3,1,0.172727,0.160473,0.599545,0.304627,25,1137,1162
|
||||
13,1/13/2011,1,0,1,4,1,0.165,0.150883,0.470417,0.301,38,1368,1406
|
||||
14,1/14/2011,1,0,1,5,1,0.16087,0.188413,0.537826,0.126548,54,1367,1421
|
||||
15,1/15/2011,1,0,1,6,2,0.233333,0.248112,0.49875,0.157963,222,1026,1248
|
||||
16,1/16/2011,1,0,1,0,1,0.231667,0.234217,0.48375,0.188433,251,953,1204
|
||||
17,1/17/2011,1,0,1,1,2,0.175833,0.176771,0.5375,0.194017,117,883,1000
|
||||
18,1/18/2011,1,0,1,2,2,0.216667,0.232333,0.861667,0.146775,9,674,683
|
||||
19,1/19/2011,1,0,1,3,2,0.292174,0.298422,0.741739,0.208317,78,1572,1650
|
||||
20,1/20/2011,1,0,1,4,2,0.261667,0.25505,0.538333,0.195904,83,1844,1927
|
||||
21,1/21/2011,1,0,1,5,1,0.1775,0.157833,0.457083,0.353242,75,1468,1543
|
||||
22,1/22/2011,1,0,1,6,1,0.0591304,0.0790696,0.4,0.17197,93,888,981
|
||||
23,1/23/2011,1,0,1,0,1,0.0965217,0.0988391,0.436522,0.2466,150,836,986
|
||||
24,1/24/2011,1,0,1,1,1,0.0973913,0.11793,0.491739,0.15833,86,1330,1416
|
||||
25,1/25/2011,1,0,1,2,2,0.223478,0.234526,0.616957,0.129796,186,1799,1985
|
||||
26,1/26/2011,1,0,1,3,3,0.2175,0.2036,0.8625,0.29385,34,472,506
|
||||
27,1/27/2011,1,0,1,4,1,0.195,0.2197,0.6875,0.113837,15,416,431
|
||||
28,1/28/2011,1,0,1,5,2,0.203478,0.223317,0.793043,0.1233,38,1129,1167
|
||||
29,1/29/2011,1,0,1,6,1,0.196522,0.212126,0.651739,0.145365,123,975,1098
|
||||
30,1/30/2011,1,0,1,0,1,0.216522,0.250322,0.722174,0.0739826,140,956,1096
|
||||
31,1/31/2011,1,0,1,1,2,0.180833,0.18625,0.60375,0.187192,42,1459,1501
|
||||
32,2/1/2011,1,0,2,2,2,0.192174,0.23453,0.829565,0.053213,47,1313,1360
|
||||
33,2/2/2011,1,0,2,3,2,0.26,0.254417,0.775417,0.264308,72,1454,1526
|
||||
34,2/3/2011,1,0,2,4,1,0.186957,0.177878,0.437826,0.277752,61,1489,1550
|
||||
35,2/4/2011,1,0,2,5,2,0.211304,0.228587,0.585217,0.127839,88,1620,1708
|
||||
36,2/5/2011,1,0,2,6,2,0.233333,0.243058,0.929167,0.161079,100,905,1005
|
||||
37,2/6/2011,1,0,2,0,1,0.285833,0.291671,0.568333,0.1418,354,1269,1623
|
||||
38,2/7/2011,1,0,2,1,1,0.271667,0.303658,0.738333,0.0454083,120,1592,1712
|
||||
39,2/8/2011,1,0,2,2,1,0.220833,0.198246,0.537917,0.36195,64,1466,1530
|
||||
40,2/9/2011,1,0,2,3,2,0.134783,0.144283,0.494783,0.188839,53,1552,1605
|
||||
41,2/10/2011,1,0,2,4,1,0.144348,0.149548,0.437391,0.221935,47,1491,1538
|
||||
42,2/11/2011,1,0,2,5,1,0.189091,0.213509,0.506364,0.10855,149,1597,1746
|
||||
43,2/12/2011,1,0,2,6,1,0.2225,0.232954,0.544167,0.203367,288,1184,1472
|
||||
44,2/13/2011,1,0,2,0,1,0.316522,0.324113,0.457391,0.260883,397,1192,1589
|
||||
45,2/14/2011,1,0,2,1,1,0.415,0.39835,0.375833,0.417908,208,1705,1913
|
||||
46,2/15/2011,1,0,2,2,1,0.266087,0.254274,0.314348,0.291374,140,1675,1815
|
||||
47,2/16/2011,1,0,2,3,1,0.318261,0.3162,0.423478,0.251791,218,1897,2115
|
||||
48,2/17/2011,1,0,2,4,1,0.435833,0.428658,0.505,0.230104,259,2216,2475
|
||||
49,2/18/2011,1,0,2,5,1,0.521667,0.511983,0.516667,0.264925,579,2348,2927
|
||||
50,2/19/2011,1,0,2,6,1,0.399167,0.391404,0.187917,0.507463,532,1103,1635
|
||||
51,2/20/2011,1,0,2,0,1,0.285217,0.27733,0.407826,0.223235,639,1173,1812
|
||||
52,2/21/2011,1,0,2,1,2,0.303333,0.284075,0.605,0.307846,195,912,1107
|
||||
53,2/22/2011,1,0,2,2,1,0.182222,0.186033,0.577778,0.195683,74,1376,1450
|
||||
54,2/23/2011,1,0,2,3,1,0.221739,0.245717,0.423043,0.094113,139,1778,1917
|
||||
55,2/24/2011,1,0,2,4,2,0.295652,0.289191,0.697391,0.250496,100,1707,1807
|
||||
56,2/25/2011,1,0,2,5,2,0.364348,0.350461,0.712174,0.346539,120,1341,1461
|
||||
57,2/26/2011,1,0,2,6,1,0.2825,0.282192,0.537917,0.186571,424,1545,1969
|
||||
58,2/27/2011,1,0,2,0,1,0.343478,0.351109,0.68,0.125248,694,1708,2402
|
||||
59,2/28/2011,1,0,2,1,2,0.407273,0.400118,0.876364,0.289686,81,1365,1446
|
||||
60,3/1/2011,1,0,3,2,1,0.266667,0.263879,0.535,0.216425,137,1714,1851
|
||||
61,3/2/2011,1,0,3,3,1,0.335,0.320071,0.449583,0.307833,231,1903,2134
|
||||
62,3/3/2011,1,0,3,4,1,0.198333,0.200133,0.318333,0.225754,123,1562,1685
|
||||
63,3/4/2011,1,0,3,5,2,0.261667,0.255679,0.610417,0.203346,214,1730,1944
|
||||
64,3/5/2011,1,0,3,6,2,0.384167,0.378779,0.789167,0.251871,640,1437,2077
|
||||
65,3/6/2011,1,0,3,0,2,0.376522,0.366252,0.948261,0.343287,114,491,605
|
||||
66,3/7/2011,1,0,3,1,1,0.261739,0.238461,0.551304,0.341352,244,1628,1872
|
||||
67,3/8/2011,1,0,3,2,1,0.2925,0.3024,0.420833,0.12065,316,1817,2133
|
||||
68,3/9/2011,1,0,3,3,2,0.295833,0.286608,0.775417,0.22015,191,1700,1891
|
||||
69,3/10/2011,1,0,3,4,3,0.389091,0.385668,0,0.261877,46,577,623
|
||||
70,3/11/2011,1,0,3,5,2,0.316522,0.305,0.649565,0.23297,247,1730,1977
|
||||
71,3/12/2011,1,0,3,6,1,0.329167,0.32575,0.594583,0.220775,724,1408,2132
|
||||
72,3/13/2011,1,0,3,0,1,0.384348,0.380091,0.527391,0.270604,982,1435,2417
|
||||
73,3/14/2011,1,0,3,1,1,0.325217,0.332,0.496957,0.136926,359,1687,2046
|
||||
74,3/15/2011,1,0,3,2,2,0.317391,0.318178,0.655652,0.184309,289,1767,2056
|
||||
75,3/16/2011,1,0,3,3,2,0.365217,0.36693,0.776522,0.203117,321,1871,2192
|
||||
76,3/17/2011,1,0,3,4,1,0.415,0.410333,0.602917,0.209579,424,2320,2744
|
||||
77,3/18/2011,1,0,3,5,1,0.54,0.527009,0.525217,0.231017,884,2355,3239
|
||||
78,3/19/2011,1,0,3,6,1,0.4725,0.466525,0.379167,0.368167,1424,1693,3117
|
||||
79,3/20/2011,1,0,3,0,1,0.3325,0.32575,0.47375,0.207721,1047,1424,2471
|
||||
80,3/21/2011,2,0,3,1,2,0.430435,0.409735,0.737391,0.288783,401,1676,2077
|
||||
81,3/22/2011,2,0,3,2,1,0.441667,0.440642,0.624583,0.22575,460,2243,2703
|
||||
82,3/23/2011,2,0,3,3,2,0.346957,0.337939,0.839565,0.234261,203,1918,2121
|
||||
83,3/24/2011,2,0,3,4,2,0.285,0.270833,0.805833,0.243787,166,1699,1865
|
||||
84,3/25/2011,2,0,3,5,1,0.264167,0.256312,0.495,0.230725,300,1910,2210
|
||||
85,3/26/2011,2,0,3,6,1,0.265833,0.257571,0.394167,0.209571,981,1515,2496
|
||||
86,3/27/2011,2,0,3,0,2,0.253043,0.250339,0.493913,0.1843,472,1221,1693
|
||||
87,3/28/2011,2,0,3,1,1,0.264348,0.257574,0.302174,0.212204,222,1806,2028
|
||||
88,3/29/2011,2,0,3,2,1,0.3025,0.292908,0.314167,0.226996,317,2108,2425
|
||||
89,3/30/2011,2,0,3,3,2,0.3,0.29735,0.646667,0.172888,168,1368,1536
|
||||
90,3/31/2011,2,0,3,4,3,0.268333,0.257575,0.918333,0.217646,179,1506,1685
|
||||
91,4/1/2011,2,0,4,5,2,0.3,0.283454,0.68625,0.258708,307,1920,2227
|
||||
92,4/2/2011,2,0,4,6,2,0.315,0.315637,0.65375,0.197146,898,1354,2252
|
||||
93,4/3/2011,2,0,4,0,1,0.378333,0.378767,0.48,0.182213,1651,1598,3249
|
||||
94,4/4/2011,2,0,4,1,1,0.573333,0.542929,0.42625,0.385571,734,2381,3115
|
||||
95,4/5/2011,2,0,4,2,2,0.414167,0.39835,0.642083,0.388067,167,1628,1795
|
||||
96,4/6/2011,2,0,4,3,1,0.390833,0.387608,0.470833,0.263063,413,2395,2808
|
||||
97,4/7/2011,2,0,4,4,1,0.4375,0.433696,0.602917,0.162312,571,2570,3141
|
||||
98,4/8/2011,2,0,4,5,2,0.335833,0.324479,0.83625,0.226992,172,1299,1471
|
||||
99,4/9/2011,2,0,4,6,2,0.3425,0.341529,0.8775,0.133083,879,1576,2455
|
||||
100,4/10/2011,2,0,4,0,2,0.426667,0.426737,0.8575,0.146767,1188,1707,2895
|
||||
101,4/11/2011,2,0,4,1,2,0.595652,0.565217,0.716956,0.324474,855,2493,3348
|
||||
102,4/12/2011,2,0,4,2,2,0.5025,0.493054,0.739167,0.274879,257,1777,2034
|
||||
103,4/13/2011,2,0,4,3,2,0.4125,0.417283,0.819167,0.250617,209,1953,2162
|
||||
104,4/14/2011,2,0,4,4,1,0.4675,0.462742,0.540417,0.1107,529,2738,3267
|
||||
105,4/15/2011,2,0,4,5,1,0.446667,0.441913,0.67125,0.226375,642,2484,3126
|
||||
106,4/16/2011,2,0,4,6,3,0.430833,0.425492,0.888333,0.340808,121,674,795
|
||||
107,4/17/2011,2,0,4,0,1,0.456667,0.445696,0.479583,0.303496,1558,2186,3744
|
||||
108,4/18/2011,2,0,4,1,1,0.5125,0.503146,0.5425,0.163567,669,2760,3429
|
||||
109,4/19/2011,2,0,4,2,2,0.505833,0.489258,0.665833,0.157971,409,2795,3204
|
||||
110,4/20/2011,2,0,4,3,1,0.595,0.564392,0.614167,0.241925,613,3331,3944
|
||||
111,4/21/2011,2,0,4,4,1,0.459167,0.453892,0.407083,0.325258,745,3444,4189
|
||||
112,4/22/2011,2,0,4,5,2,0.336667,0.321954,0.729583,0.219521,177,1506,1683
|
||||
113,4/23/2011,2,0,4,6,2,0.46,0.450121,0.887917,0.230725,1462,2574,4036
|
||||
114,4/24/2011,2,0,4,0,2,0.581667,0.551763,0.810833,0.192175,1710,2481,4191
|
||||
115,4/25/2011,2,0,4,1,1,0.606667,0.5745,0.776667,0.185333,773,3300,4073
|
||||
116,4/26/2011,2,0,4,2,1,0.631667,0.594083,0.729167,0.3265,678,3722,4400
|
||||
117,4/27/2011,2,0,4,3,2,0.62,0.575142,0.835417,0.3122,547,3325,3872
|
||||
118,4/28/2011,2,0,4,4,2,0.6175,0.578929,0.700833,0.320908,569,3489,4058
|
||||
119,4/29/2011,2,0,4,5,1,0.51,0.497463,0.457083,0.240063,878,3717,4595
|
||||
120,4/30/2011,2,0,4,6,1,0.4725,0.464021,0.503333,0.235075,1965,3347,5312
|
||||
121,5/1/2011,2,0,5,0,2,0.451667,0.448204,0.762083,0.106354,1138,2213,3351
|
||||
122,5/2/2011,2,0,5,1,2,0.549167,0.532833,0.73,0.183454,847,3554,4401
|
||||
123,5/3/2011,2,0,5,2,2,0.616667,0.582079,0.697083,0.342667,603,3848,4451
|
||||
124,5/4/2011,2,0,5,3,2,0.414167,0.40465,0.737083,0.328996,255,2378,2633
|
||||
125,5/5/2011,2,0,5,4,1,0.459167,0.441917,0.444167,0.295392,614,3819,4433
|
||||
126,5/6/2011,2,0,5,5,1,0.479167,0.474117,0.59,0.228246,894,3714,4608
|
||||
127,5/7/2011,2,0,5,6,1,0.52,0.512621,0.54125,0.16045,1612,3102,4714
|
||||
128,5/8/2011,2,0,5,0,1,0.528333,0.518933,0.631667,0.0746375,1401,2932,4333
|
||||
129,5/9/2011,2,0,5,1,1,0.5325,0.525246,0.58875,0.176,664,3698,4362
|
||||
130,5/10/2011,2,0,5,2,1,0.5325,0.522721,0.489167,0.115671,694,4109,4803
|
||||
131,5/11/2011,2,0,5,3,1,0.5425,0.5284,0.632917,0.120642,550,3632,4182
|
||||
132,5/12/2011,2,0,5,4,1,0.535,0.523363,0.7475,0.189667,695,4169,4864
|
||||
133,5/13/2011,2,0,5,5,2,0.5125,0.4943,0.863333,0.179725,692,3413,4105
|
||||
134,5/14/2011,2,0,5,6,2,0.520833,0.500629,0.9225,0.13495,902,2507,3409
|
||||
135,5/15/2011,2,0,5,0,2,0.5625,0.536,0.867083,0.152979,1582,2971,4553
|
||||
136,5/16/2011,2,0,5,1,1,0.5775,0.550512,0.787917,0.126871,773,3185,3958
|
||||
137,5/17/2011,2,0,5,2,2,0.561667,0.538529,0.837917,0.277354,678,3445,4123
|
||||
138,5/18/2011,2,0,5,3,2,0.55,0.527158,0.87,0.201492,536,3319,3855
|
||||
139,5/19/2011,2,0,5,4,2,0.530833,0.510742,0.829583,0.108213,735,3840,4575
|
||||
140,5/20/2011,2,0,5,5,1,0.536667,0.529042,0.719583,0.125013,909,4008,4917
|
||||
141,5/21/2011,2,0,5,6,1,0.6025,0.571975,0.626667,0.12065,2258,3547,5805
|
||||
142,5/22/2011,2,0,5,0,1,0.604167,0.5745,0.749583,0.148008,1576,3084,4660
|
||||
143,5/23/2011,2,0,5,1,2,0.631667,0.590296,0.81,0.233842,836,3438,4274
|
||||
144,5/24/2011,2,0,5,2,2,0.66,0.604813,0.740833,0.207092,659,3833,4492
|
||||
145,5/25/2011,2,0,5,3,1,0.660833,0.615542,0.69625,0.154233,740,4238,4978
|
||||
146,5/26/2011,2,0,5,4,1,0.708333,0.654688,0.6775,0.199642,758,3919,4677
|
||||
147,5/27/2011,2,0,5,5,1,0.681667,0.637008,0.65375,0.240679,871,3808,4679
|
||||
148,5/28/2011,2,0,5,6,1,0.655833,0.612379,0.729583,0.230092,2001,2757,4758
|
||||
149,5/29/2011,2,0,5,0,1,0.6675,0.61555,0.81875,0.213938,2355,2433,4788
|
||||
150,5/30/2011,2,0,5,1,1,0.733333,0.671092,0.685,0.131225,1549,2549,4098
|
||||
151,5/31/2011,2,0,5,2,1,0.775,0.725383,0.636667,0.111329,673,3309,3982
|
||||
152,6/1/2011,2,0,6,3,2,0.764167,0.720967,0.677083,0.207092,513,3461,3974
|
||||
153,6/2/2011,2,0,6,4,1,0.715,0.643942,0.305,0.292287,736,4232,4968
|
||||
154,6/3/2011,2,0,6,5,1,0.62,0.587133,0.354167,0.253121,898,4414,5312
|
||||
155,6/4/2011,2,0,6,6,1,0.635,0.594696,0.45625,0.123142,1869,3473,5342
|
||||
156,6/5/2011,2,0,6,0,2,0.648333,0.616804,0.6525,0.138692,1685,3221,4906
|
||||
157,6/6/2011,2,0,6,1,1,0.678333,0.621858,0.6,0.121896,673,3875,4548
|
||||
158,6/7/2011,2,0,6,2,1,0.7075,0.65595,0.597917,0.187808,763,4070,4833
|
||||
159,6/8/2011,2,0,6,3,1,0.775833,0.727279,0.622083,0.136817,676,3725,4401
|
||||
160,6/9/2011,2,0,6,4,2,0.808333,0.757579,0.568333,0.149883,563,3352,3915
|
||||
161,6/10/2011,2,0,6,5,1,0.755,0.703292,0.605,0.140554,815,3771,4586
|
||||
162,6/11/2011,2,0,6,6,1,0.725,0.678038,0.654583,0.15485,1729,3237,4966
|
||||
163,6/12/2011,2,0,6,0,1,0.6925,0.643325,0.747917,0.163567,1467,2993,4460
|
||||
164,6/13/2011,2,0,6,1,1,0.635,0.601654,0.494583,0.30535,863,4157,5020
|
||||
165,6/14/2011,2,0,6,2,1,0.604167,0.591546,0.507083,0.269283,727,4164,4891
|
||||
166,6/15/2011,2,0,6,3,1,0.626667,0.587754,0.471667,0.167912,769,4411,5180
|
||||
167,6/16/2011,2,0,6,4,2,0.628333,0.595346,0.688333,0.206471,545,3222,3767
|
||||
168,6/17/2011,2,0,6,5,1,0.649167,0.600383,0.735833,0.143029,863,3981,4844
|
||||
169,6/18/2011,2,0,6,6,1,0.696667,0.643954,0.670417,0.119408,1807,3312,5119
|
||||
170,6/19/2011,2,0,6,0,2,0.699167,0.645846,0.666667,0.102,1639,3105,4744
|
||||
171,6/20/2011,2,0,6,1,2,0.635,0.595346,0.74625,0.155475,699,3311,4010
|
||||
172,6/21/2011,3,0,6,2,2,0.680833,0.637646,0.770417,0.171025,774,4061,4835
|
||||
173,6/22/2011,3,0,6,3,1,0.733333,0.693829,0.7075,0.172262,661,3846,4507
|
||||
174,6/23/2011,3,0,6,4,2,0.728333,0.693833,0.703333,0.238804,746,4044,4790
|
||||
175,6/24/2011,3,0,6,5,1,0.724167,0.656583,0.573333,0.222025,969,4022,4991
|
||||
176,6/25/2011,3,0,6,6,1,0.695,0.643313,0.483333,0.209571,1782,3420,5202
|
||||
177,6/26/2011,3,0,6,0,1,0.68,0.637629,0.513333,0.0945333,1920,3385,5305
|
||||
178,6/27/2011,3,0,6,1,2,0.6825,0.637004,0.658333,0.107588,854,3854,4708
|
||||
179,6/28/2011,3,0,6,2,1,0.744167,0.692558,0.634167,0.144283,732,3916,4648
|
||||
180,6/29/2011,3,0,6,3,1,0.728333,0.654688,0.497917,0.261821,848,4377,5225
|
||||
181,6/30/2011,3,0,6,4,1,0.696667,0.637008,0.434167,0.185312,1027,4488,5515
|
||||
182,7/1/2011,3,0,7,5,1,0.7225,0.652162,0.39625,0.102608,1246,4116,5362
|
||||
183,7/2/2011,3,0,7,6,1,0.738333,0.667308,0.444583,0.115062,2204,2915,5119
|
||||
184,7/3/2011,3,0,7,0,2,0.716667,0.668575,0.6825,0.228858,2282,2367,4649
|
||||
185,7/4/2011,3,0,7,1,2,0.726667,0.665417,0.637917,0.0814792,3065,2978,6043
|
||||
186,7/5/2011,3,0,7,2,1,0.746667,0.696338,0.590417,0.126258,1031,3634,4665
|
||||
187,7/6/2011,3,0,7,3,1,0.72,0.685633,0.743333,0.149883,784,3845,4629
|
||||
188,7/7/2011,3,0,7,4,1,0.75,0.686871,0.65125,0.1592,754,3838,4592
|
||||
189,7/8/2011,3,0,7,5,2,0.709167,0.670483,0.757917,0.225129,692,3348,4040
|
||||
190,7/9/2011,3,0,7,6,1,0.733333,0.664158,0.609167,0.167912,1988,3348,5336
|
||||
191,7/10/2011,3,0,7,0,1,0.7475,0.690025,0.578333,0.183471,1743,3138,4881
|
||||
192,7/11/2011,3,0,7,1,1,0.7625,0.729804,0.635833,0.282337,723,3363,4086
|
||||
193,7/12/2011,3,0,7,2,1,0.794167,0.739275,0.559167,0.200254,662,3596,4258
|
||||
194,7/13/2011,3,0,7,3,1,0.746667,0.689404,0.631667,0.146133,748,3594,4342
|
||||
195,7/14/2011,3,0,7,4,1,0.680833,0.635104,0.47625,0.240667,888,4196,5084
|
||||
196,7/15/2011,3,0,7,5,1,0.663333,0.624371,0.59125,0.182833,1318,4220,5538
|
||||
197,7/16/2011,3,0,7,6,1,0.686667,0.638263,0.585,0.208342,2418,3505,5923
|
||||
198,7/17/2011,3,0,7,0,1,0.719167,0.669833,0.604167,0.245033,2006,3296,5302
|
||||
199,7/18/2011,3,0,7,1,1,0.746667,0.703925,0.65125,0.215804,841,3617,4458
|
||||
200,7/19/2011,3,0,7,2,1,0.776667,0.747479,0.650417,0.1306,752,3789,4541
|
||||
201,7/20/2011,3,0,7,3,1,0.768333,0.74685,0.707083,0.113817,644,3688,4332
|
||||
202,7/21/2011,3,0,7,4,2,0.815,0.826371,0.69125,0.222021,632,3152,3784
|
||||
203,7/22/2011,3,0,7,5,1,0.848333,0.840896,0.580417,0.1331,562,2825,3387
|
||||
204,7/23/2011,3,0,7,6,1,0.849167,0.804287,0.5,0.131221,987,2298,3285
|
||||
205,7/24/2011,3,0,7,0,1,0.83,0.794829,0.550833,0.169171,1050,2556,3606
|
||||
206,7/25/2011,3,0,7,1,1,0.743333,0.720958,0.757083,0.0908083,568,3272,3840
|
||||
207,7/26/2011,3,0,7,2,1,0.771667,0.696979,0.540833,0.200258,750,3840,4590
|
||||
208,7/27/2011,3,0,7,3,1,0.775,0.690667,0.402917,0.183463,755,3901,4656
|
||||
209,7/28/2011,3,0,7,4,1,0.779167,0.7399,0.583333,0.178479,606,3784,4390
|
||||
210,7/29/2011,3,0,7,5,1,0.838333,0.785967,0.5425,0.174138,670,3176,3846
|
||||
211,7/30/2011,3,0,7,6,1,0.804167,0.728537,0.465833,0.168537,1559,2916,4475
|
||||
212,7/31/2011,3,0,7,0,1,0.805833,0.729796,0.480833,0.164813,1524,2778,4302
|
||||
213,8/1/2011,3,0,8,1,1,0.771667,0.703292,0.550833,0.156717,729,3537,4266
|
||||
214,8/2/2011,3,0,8,2,1,0.783333,0.707071,0.49125,0.20585,801,4044,4845
|
||||
215,8/3/2011,3,0,8,3,2,0.731667,0.679937,0.6575,0.135583,467,3107,3574
|
||||
216,8/4/2011,3,0,8,4,2,0.71,0.664788,0.7575,0.19715,799,3777,4576
|
||||
217,8/5/2011,3,0,8,5,1,0.710833,0.656567,0.630833,0.184696,1023,3843,4866
|
||||
218,8/6/2011,3,0,8,6,2,0.716667,0.676154,0.755,0.22825,1521,2773,4294
|
||||
219,8/7/2011,3,0,8,0,1,0.7425,0.715292,0.752917,0.201487,1298,2487,3785
|
||||
220,8/8/2011,3,0,8,1,1,0.765,0.703283,0.592083,0.192175,846,3480,4326
|
||||
221,8/9/2011,3,0,8,2,1,0.775,0.724121,0.570417,0.151121,907,3695,4602
|
||||
222,8/10/2011,3,0,8,3,1,0.766667,0.684983,0.424167,0.200258,884,3896,4780
|
||||
223,8/11/2011,3,0,8,4,1,0.7175,0.651521,0.42375,0.164796,812,3980,4792
|
||||
224,8/12/2011,3,0,8,5,1,0.708333,0.654042,0.415,0.125621,1051,3854,4905
|
||||
225,8/13/2011,3,0,8,6,2,0.685833,0.645858,0.729583,0.211454,1504,2646,4150
|
||||
226,8/14/2011,3,0,8,0,2,0.676667,0.624388,0.8175,0.222633,1338,2482,3820
|
||||
227,8/15/2011,3,0,8,1,1,0.665833,0.616167,0.712083,0.208954,775,3563,4338
|
||||
228,8/16/2011,3,0,8,2,1,0.700833,0.645837,0.578333,0.236329,721,4004,4725
|
||||
229,8/17/2011,3,0,8,3,1,0.723333,0.666671,0.575417,0.143667,668,4026,4694
|
||||
230,8/18/2011,3,0,8,4,1,0.711667,0.662258,0.654583,0.233208,639,3166,3805
|
||||
231,8/19/2011,3,0,8,5,2,0.685,0.633221,0.722917,0.139308,797,3356,4153
|
||||
232,8/20/2011,3,0,8,6,1,0.6975,0.648996,0.674167,0.104467,1914,3277,5191
|
||||
233,8/21/2011,3,0,8,0,1,0.710833,0.675525,0.77,0.248754,1249,2624,3873
|
||||
234,8/22/2011,3,0,8,1,1,0.691667,0.638254,0.47,0.27675,833,3925,4758
|
||||
235,8/23/2011,3,0,8,2,1,0.640833,0.606067,0.455417,0.146763,1281,4614,5895
|
||||
236,8/24/2011,3,0,8,3,1,0.673333,0.630692,0.605,0.253108,949,4181,5130
|
||||
237,8/25/2011,3,0,8,4,2,0.684167,0.645854,0.771667,0.210833,435,3107,3542
|
||||
238,8/26/2011,3,0,8,5,1,0.7,0.659733,0.76125,0.0839625,768,3893,4661
|
||||
239,8/27/2011,3,0,8,6,2,0.68,0.635556,0.85,0.375617,226,889,1115
|
||||
240,8/28/2011,3,0,8,0,1,0.707059,0.647959,0.561765,0.304659,1415,2919,4334
|
||||
241,8/29/2011,3,0,8,1,1,0.636667,0.607958,0.554583,0.159825,729,3905,4634
|
||||
242,8/30/2011,3,0,8,2,1,0.639167,0.594704,0.548333,0.125008,775,4429,5204
|
||||
243,8/31/2011,3,0,8,3,1,0.656667,0.611121,0.597917,0.0833333,688,4370,5058
|
||||
244,9/1/2011,3,0,9,4,1,0.655,0.614921,0.639167,0.141796,783,4332,5115
|
||||
245,9/2/2011,3,0,9,5,2,0.643333,0.604808,0.727083,0.139929,875,3852,4727
|
||||
246,9/3/2011,3,0,9,6,1,0.669167,0.633213,0.716667,0.185325,1935,2549,4484
|
||||
247,9/4/2011,3,0,9,0,1,0.709167,0.665429,0.742083,0.206467,2521,2419,4940
|
||||
248,9/5/2011,3,0,9,1,2,0.673333,0.625646,0.790417,0.212696,1236,2115,3351
|
||||
249,9/6/2011,3,0,9,2,3,0.54,0.5152,0.886957,0.343943,204,2506,2710
|
||||
250,9/7/2011,3,0,9,3,3,0.599167,0.544229,0.917083,0.0970208,118,1878,1996
|
||||
251,9/8/2011,3,0,9,4,3,0.633913,0.555361,0.939565,0.192748,153,1689,1842
|
||||
252,9/9/2011,3,0,9,5,2,0.65,0.578946,0.897917,0.124379,417,3127,3544
|
||||
253,9/10/2011,3,0,9,6,1,0.66,0.607962,0.75375,0.153608,1750,3595,5345
|
||||
254,9/11/2011,3,0,9,0,1,0.653333,0.609229,0.71375,0.115054,1633,3413,5046
|
||||
255,9/12/2011,3,0,9,1,1,0.644348,0.60213,0.692174,0.088913,690,4023,4713
|
||||
256,9/13/2011,3,0,9,2,1,0.650833,0.603554,0.7125,0.141804,701,4062,4763
|
||||
257,9/14/2011,3,0,9,3,1,0.673333,0.6269,0.697083,0.1673,647,4138,4785
|
||||
258,9/15/2011,3,0,9,4,2,0.5775,0.553671,0.709167,0.271146,428,3231,3659
|
||||
259,9/16/2011,3,0,9,5,2,0.469167,0.461475,0.590417,0.164183,742,4018,4760
|
||||
260,9/17/2011,3,0,9,6,2,0.491667,0.478512,0.718333,0.189675,1434,3077,4511
|
||||
261,9/18/2011,3,0,9,0,1,0.5075,0.490537,0.695,0.178483,1353,2921,4274
|
||||
262,9/19/2011,3,0,9,1,2,0.549167,0.529675,0.69,0.151742,691,3848,4539
|
||||
263,9/20/2011,3,0,9,2,2,0.561667,0.532217,0.88125,0.134954,438,3203,3641
|
||||
264,9/21/2011,3,0,9,3,2,0.595,0.550533,0.9,0.0964042,539,3813,4352
|
||||
265,9/22/2011,3,0,9,4,2,0.628333,0.554963,0.902083,0.128125,555,4240,4795
|
||||
266,9/23/2011,4,0,9,5,2,0.609167,0.522125,0.9725,0.0783667,258,2137,2395
|
||||
267,9/24/2011,4,0,9,6,2,0.606667,0.564412,0.8625,0.0783833,1776,3647,5423
|
||||
268,9/25/2011,4,0,9,0,2,0.634167,0.572637,0.845,0.0503792,1544,3466,5010
|
||||
269,9/26/2011,4,0,9,1,2,0.649167,0.589042,0.848333,0.1107,684,3946,4630
|
||||
270,9/27/2011,4,0,9,2,2,0.636667,0.574525,0.885417,0.118171,477,3643,4120
|
||||
271,9/28/2011,4,0,9,3,2,0.635,0.575158,0.84875,0.148629,480,3427,3907
|
||||
272,9/29/2011,4,0,9,4,1,0.616667,0.574512,0.699167,0.172883,653,4186,4839
|
||||
273,9/30/2011,4,0,9,5,1,0.564167,0.544829,0.6475,0.206475,830,4372,5202
|
||||
274,10/1/2011,4,0,10,6,2,0.41,0.412863,0.75375,0.292296,480,1949,2429
|
||||
275,10/2/2011,4,0,10,0,2,0.356667,0.345317,0.791667,0.222013,616,2302,2918
|
||||
276,10/3/2011,4,0,10,1,2,0.384167,0.392046,0.760833,0.0833458,330,3240,3570
|
||||
277,10/4/2011,4,0,10,2,1,0.484167,0.472858,0.71,0.205854,486,3970,4456
|
||||
278,10/5/2011,4,0,10,3,1,0.538333,0.527138,0.647917,0.17725,559,4267,4826
|
||||
279,10/6/2011,4,0,10,4,1,0.494167,0.480425,0.620833,0.134954,639,4126,4765
|
||||
280,10/7/2011,4,0,10,5,1,0.510833,0.504404,0.684167,0.0223917,949,4036,4985
|
||||
281,10/8/2011,4,0,10,6,1,0.521667,0.513242,0.70125,0.0454042,2235,3174,5409
|
||||
282,10/9/2011,4,0,10,0,1,0.540833,0.523983,0.7275,0.06345,2397,3114,5511
|
||||
283,10/10/2011,4,0,10,1,1,0.570833,0.542925,0.73375,0.0423042,1514,3603,5117
|
||||
284,10/11/2011,4,0,10,2,2,0.566667,0.546096,0.80875,0.143042,667,3896,4563
|
||||
285,10/12/2011,4,0,10,3,3,0.543333,0.517717,0.90625,0.24815,217,2199,2416
|
||||
286,10/13/2011,4,0,10,4,2,0.589167,0.551804,0.896667,0.141787,290,2623,2913
|
||||
287,10/14/2011,4,0,10,5,2,0.550833,0.529675,0.71625,0.223883,529,3115,3644
|
||||
288,10/15/2011,4,0,10,6,1,0.506667,0.498725,0.483333,0.258083,1899,3318,5217
|
||||
289,10/16/2011,4,0,10,0,1,0.511667,0.503154,0.486667,0.281717,1748,3293,5041
|
||||
290,10/17/2011,4,0,10,1,1,0.534167,0.510725,0.579583,0.175379,713,3857,4570
|
||||
291,10/18/2011,4,0,10,2,2,0.5325,0.522721,0.701667,0.110087,637,4111,4748
|
||||
292,10/19/2011,4,0,10,3,3,0.541739,0.513848,0.895217,0.243339,254,2170,2424
|
||||
293,10/20/2011,4,0,10,4,1,0.475833,0.466525,0.63625,0.422275,471,3724,4195
|
||||
294,10/21/2011,4,0,10,5,1,0.4275,0.423596,0.574167,0.221396,676,3628,4304
|
||||
295,10/22/2011,4,0,10,6,1,0.4225,0.425492,0.629167,0.0926667,1499,2809,4308
|
||||
296,10/23/2011,4,0,10,0,1,0.421667,0.422333,0.74125,0.0995125,1619,2762,4381
|
||||
297,10/24/2011,4,0,10,1,1,0.463333,0.457067,0.772083,0.118792,699,3488,4187
|
||||
298,10/25/2011,4,0,10,2,1,0.471667,0.463375,0.622917,0.166658,695,3992,4687
|
||||
299,10/26/2011,4,0,10,3,2,0.484167,0.472846,0.720417,0.148642,404,3490,3894
|
||||
300,10/27/2011,4,0,10,4,2,0.47,0.457046,0.812917,0.197763,240,2419,2659
|
||||
301,10/28/2011,4,0,10,5,2,0.330833,0.318812,0.585833,0.229479,456,3291,3747
|
||||
302,10/29/2011,4,0,10,6,3,0.254167,0.227913,0.8825,0.351371,57,570,627
|
||||
303,10/30/2011,4,0,10,0,1,0.319167,0.321329,0.62375,0.176617,885,2446,3331
|
||||
304,10/31/2011,4,0,10,1,1,0.34,0.356063,0.703333,0.10635,362,3307,3669
|
||||
305,11/1/2011,4,0,11,2,1,0.400833,0.397088,0.68375,0.135571,410,3658,4068
|
||||
306,11/2/2011,4,0,11,3,1,0.3775,0.390133,0.71875,0.0820917,370,3816,4186
|
||||
307,11/3/2011,4,0,11,4,1,0.408333,0.405921,0.702083,0.136817,318,3656,3974
|
||||
308,11/4/2011,4,0,11,5,2,0.403333,0.403392,0.6225,0.271779,470,3576,4046
|
||||
309,11/5/2011,4,0,11,6,1,0.326667,0.323854,0.519167,0.189062,1156,2770,3926
|
||||
310,11/6/2011,4,0,11,0,1,0.348333,0.362358,0.734583,0.0920542,952,2697,3649
|
||||
311,11/7/2011,4,0,11,1,1,0.395,0.400871,0.75875,0.057225,373,3662,4035
|
||||
312,11/8/2011,4,0,11,2,1,0.408333,0.412246,0.721667,0.0690375,376,3829,4205
|
||||
313,11/9/2011,4,0,11,3,1,0.4,0.409079,0.758333,0.0621958,305,3804,4109
|
||||
314,11/10/2011,4,0,11,4,2,0.38,0.373721,0.813333,0.189067,190,2743,2933
|
||||
315,11/11/2011,4,0,11,5,1,0.324167,0.306817,0.44625,0.314675,440,2928,3368
|
||||
316,11/12/2011,4,0,11,6,1,0.356667,0.357942,0.552917,0.212062,1275,2792,4067
|
||||
317,11/13/2011,4,0,11,0,1,0.440833,0.43055,0.458333,0.281721,1004,2713,3717
|
||||
318,11/14/2011,4,0,11,1,1,0.53,0.524612,0.587083,0.306596,595,3891,4486
|
||||
319,11/15/2011,4,0,11,2,2,0.53,0.507579,0.68875,0.199633,449,3746,4195
|
||||
320,11/16/2011,4,0,11,3,3,0.456667,0.451988,0.93,0.136829,145,1672,1817
|
||||
321,11/17/2011,4,0,11,4,2,0.341667,0.323221,0.575833,0.305362,139,2914,3053
|
||||
322,11/18/2011,4,0,11,5,1,0.274167,0.272721,0.41,0.168533,245,3147,3392
|
||||
323,11/19/2011,4,0,11,6,1,0.329167,0.324483,0.502083,0.224496,943,2720,3663
|
||||
324,11/20/2011,4,0,11,0,2,0.463333,0.457058,0.684583,0.18595,787,2733,3520
|
||||
325,11/21/2011,4,0,11,1,3,0.4475,0.445062,0.91,0.138054,220,2545,2765
|
||||
326,11/22/2011,4,0,11,2,3,0.416667,0.421696,0.9625,0.118792,69,1538,1607
|
||||
327,11/23/2011,4,0,11,3,2,0.440833,0.430537,0.757917,0.335825,112,2454,2566
|
||||
328,11/24/2011,4,0,11,4,1,0.373333,0.372471,0.549167,0.167304,560,935,1495
|
||||
329,11/25/2011,4,0,11,5,1,0.375,0.380671,0.64375,0.0988958,1095,1697,2792
|
||||
330,11/26/2011,4,0,11,6,1,0.375833,0.385087,0.681667,0.0684208,1249,1819,3068
|
||||
331,11/27/2011,4,0,11,0,1,0.459167,0.4558,0.698333,0.208954,810,2261,3071
|
||||
332,11/28/2011,4,0,11,1,1,0.503478,0.490122,0.743043,0.142122,253,3614,3867
|
||||
333,11/29/2011,4,0,11,2,2,0.458333,0.451375,0.830833,0.258092,96,2818,2914
|
||||
334,11/30/2011,4,0,11,3,1,0.325,0.311221,0.613333,0.271158,188,3425,3613
|
||||
335,12/1/2011,4,0,12,4,1,0.3125,0.305554,0.524583,0.220158,182,3545,3727
|
||||
336,12/2/2011,4,0,12,5,1,0.314167,0.331433,0.625833,0.100754,268,3672,3940
|
||||
337,12/3/2011,4,0,12,6,1,0.299167,0.310604,0.612917,0.0957833,706,2908,3614
|
||||
338,12/4/2011,4,0,12,0,1,0.330833,0.3491,0.775833,0.0839583,634,2851,3485
|
||||
339,12/5/2011,4,0,12,1,2,0.385833,0.393925,0.827083,0.0622083,233,3578,3811
|
||||
340,12/6/2011,4,0,12,2,3,0.4625,0.4564,0.949583,0.232583,126,2468,2594
|
||||
341,12/7/2011,4,0,12,3,3,0.41,0.400246,0.970417,0.266175,50,655,705
|
||||
342,12/8/2011,4,0,12,4,1,0.265833,0.256938,0.58,0.240058,150,3172,3322
|
||||
343,12/9/2011,4,0,12,5,1,0.290833,0.317542,0.695833,0.0827167,261,3359,3620
|
||||
344,12/10/2011,4,0,12,6,1,0.275,0.266412,0.5075,0.233221,502,2688,3190
|
||||
345,12/11/2011,4,0,12,0,1,0.220833,0.253154,0.49,0.0665417,377,2366,2743
|
||||
346,12/12/2011,4,0,12,1,1,0.238333,0.270196,0.670833,0.06345,143,3167,3310
|
||||
347,12/13/2011,4,0,12,2,1,0.2825,0.301138,0.59,0.14055,155,3368,3523
|
||||
348,12/14/2011,4,0,12,3,2,0.3175,0.338362,0.66375,0.0609583,178,3562,3740
|
||||
349,12/15/2011,4,0,12,4,2,0.4225,0.412237,0.634167,0.268042,181,3528,3709
|
||||
350,12/16/2011,4,0,12,5,2,0.375,0.359825,0.500417,0.260575,178,3399,3577
|
||||
351,12/17/2011,4,0,12,6,2,0.258333,0.249371,0.560833,0.243167,275,2464,2739
|
||||
352,12/18/2011,4,0,12,0,1,0.238333,0.245579,0.58625,0.169779,220,2211,2431
|
||||
353,12/19/2011,4,0,12,1,1,0.276667,0.280933,0.6375,0.172896,260,3143,3403
|
||||
354,12/20/2011,4,0,12,2,2,0.385833,0.396454,0.595417,0.0615708,216,3534,3750
|
||||
355,12/21/2011,1,0,12,3,2,0.428333,0.428017,0.858333,0.2214,107,2553,2660
|
||||
356,12/22/2011,1,0,12,4,2,0.423333,0.426121,0.7575,0.047275,227,2841,3068
|
||||
357,12/23/2011,1,0,12,5,1,0.373333,0.377513,0.68625,0.274246,163,2046,2209
|
||||
358,12/24/2011,1,0,12,6,1,0.3025,0.299242,0.5425,0.190304,155,856,1011
|
||||
359,12/25/2011,1,0,12,0,1,0.274783,0.279961,0.681304,0.155091,303,451,754
|
||||
360,12/26/2011,1,0,12,1,1,0.321739,0.315535,0.506957,0.239465,430,887,1317
|
||||
361,12/27/2011,1,0,12,2,2,0.325,0.327633,0.7625,0.18845,103,1059,1162
|
||||
362,12/28/2011,1,0,12,3,1,0.29913,0.279974,0.503913,0.293961,255,2047,2302
|
||||
363,12/29/2011,1,0,12,4,1,0.248333,0.263892,0.574167,0.119412,254,2169,2423
|
||||
364,12/30/2011,1,0,12,5,1,0.311667,0.318812,0.636667,0.134337,491,2508,2999
|
||||
365,12/31/2011,1,0,12,6,1,0.41,0.414121,0.615833,0.220154,665,1820,2485
|
||||
366,1/1/2012,1,1,1,0,1,0.37,0.375621,0.6925,0.192167,686,1608,2294
|
||||
367,1/2/2012,1,1,1,1,1,0.273043,0.252304,0.381304,0.329665,244,1707,1951
|
||||
368,1/3/2012,1,1,1,2,1,0.15,0.126275,0.44125,0.365671,89,2147,2236
|
||||
369,1/4/2012,1,1,1,3,2,0.1075,0.119337,0.414583,0.1847,95,2273,2368
|
||||
370,1/5/2012,1,1,1,4,1,0.265833,0.278412,0.524167,0.129987,140,3132,3272
|
||||
371,1/6/2012,1,1,1,5,1,0.334167,0.340267,0.542083,0.167908,307,3791,4098
|
||||
372,1/7/2012,1,1,1,6,1,0.393333,0.390779,0.531667,0.174758,1070,3451,4521
|
||||
373,1/8/2012,1,1,1,0,1,0.3375,0.340258,0.465,0.191542,599,2826,3425
|
||||
374,1/9/2012,1,1,1,1,2,0.224167,0.247479,0.701667,0.0989,106,2270,2376
|
||||
375,1/10/2012,1,1,1,2,1,0.308696,0.318826,0.646522,0.187552,173,3425,3598
|
||||
376,1/11/2012,1,1,1,3,2,0.274167,0.282821,0.8475,0.131221,92,2085,2177
|
||||
377,1/12/2012,1,1,1,4,2,0.3825,0.381938,0.802917,0.180967,269,3828,4097
|
||||
378,1/13/2012,1,1,1,5,1,0.274167,0.249362,0.5075,0.378108,174,3040,3214
|
||||
379,1/14/2012,1,1,1,6,1,0.18,0.183087,0.4575,0.187183,333,2160,2493
|
||||
380,1/15/2012,1,1,1,0,1,0.166667,0.161625,0.419167,0.251258,284,2027,2311
|
||||
381,1/16/2012,1,1,1,1,1,0.19,0.190663,0.5225,0.231358,217,2081,2298
|
||||
382,1/17/2012,1,1,1,2,2,0.373043,0.364278,0.716087,0.34913,127,2808,2935
|
||||
383,1/18/2012,1,1,1,3,1,0.303333,0.275254,0.443333,0.415429,109,3267,3376
|
||||
384,1/19/2012,1,1,1,4,1,0.19,0.190038,0.4975,0.220158,130,3162,3292
|
||||
385,1/20/2012,1,1,1,5,2,0.2175,0.220958,0.45,0.20275,115,3048,3163
|
||||
386,1/21/2012,1,1,1,6,2,0.173333,0.174875,0.83125,0.222642,67,1234,1301
|
||||
387,1/22/2012,1,1,1,0,2,0.1625,0.16225,0.79625,0.199638,196,1781,1977
|
||||
388,1/23/2012,1,1,1,1,2,0.218333,0.243058,0.91125,0.110708,145,2287,2432
|
||||
389,1/24/2012,1,1,1,2,1,0.3425,0.349108,0.835833,0.123767,439,3900,4339
|
||||
390,1/25/2012,1,1,1,3,1,0.294167,0.294821,0.64375,0.161071,467,3803,4270
|
||||
391,1/26/2012,1,1,1,4,2,0.341667,0.35605,0.769583,0.0733958,244,3831,4075
|
||||
392,1/27/2012,1,1,1,5,2,0.425,0.415383,0.74125,0.342667,269,3187,3456
|
||||
393,1/28/2012,1,1,1,6,1,0.315833,0.326379,0.543333,0.210829,775,3248,4023
|
||||
394,1/29/2012,1,1,1,0,1,0.2825,0.272721,0.31125,0.24005,558,2685,3243
|
||||
395,1/30/2012,1,1,1,1,1,0.269167,0.262625,0.400833,0.215792,126,3498,3624
|
||||
396,1/31/2012,1,1,1,2,1,0.39,0.381317,0.416667,0.261817,324,4185,4509
|
||||
397,2/1/2012,1,1,2,3,1,0.469167,0.466538,0.507917,0.189067,304,4275,4579
|
||||
398,2/2/2012,1,1,2,4,2,0.399167,0.398971,0.672917,0.187187,190,3571,3761
|
||||
399,2/3/2012,1,1,2,5,1,0.313333,0.309346,0.526667,0.178496,310,3841,4151
|
||||
400,2/4/2012,1,1,2,6,2,0.264167,0.272725,0.779583,0.121896,384,2448,2832
|
||||
401,2/5/2012,1,1,2,0,2,0.265833,0.264521,0.687917,0.175996,318,2629,2947
|
||||
402,2/6/2012,1,1,2,1,1,0.282609,0.296426,0.622174,0.1538,206,3578,3784
|
||||
403,2/7/2012,1,1,2,2,1,0.354167,0.361104,0.49625,0.147379,199,4176,4375
|
||||
404,2/8/2012,1,1,2,3,2,0.256667,0.266421,0.722917,0.133721,109,2693,2802
|
||||
405,2/9/2012,1,1,2,4,1,0.265,0.261988,0.562083,0.194037,163,3667,3830
|
||||
406,2/10/2012,1,1,2,5,2,0.280833,0.293558,0.54,0.116929,227,3604,3831
|
||||
407,2/11/2012,1,1,2,6,3,0.224167,0.210867,0.73125,0.289796,192,1977,2169
|
||||
408,2/12/2012,1,1,2,0,1,0.1275,0.101658,0.464583,0.409212,73,1456,1529
|
||||
409,2/13/2012,1,1,2,1,1,0.2225,0.227913,0.41125,0.167283,94,3328,3422
|
||||
410,2/14/2012,1,1,2,2,2,0.319167,0.333946,0.50875,0.141179,135,3787,3922
|
||||
411,2/15/2012,1,1,2,3,1,0.348333,0.351629,0.53125,0.1816,141,4028,4169
|
||||
412,2/16/2012,1,1,2,4,2,0.316667,0.330162,0.752917,0.091425,74,2931,3005
|
||||
413,2/17/2012,1,1,2,5,1,0.343333,0.351629,0.634583,0.205846,349,3805,4154
|
||||
414,2/18/2012,1,1,2,6,1,0.346667,0.355425,0.534583,0.190929,1435,2883,4318
|
||||
415,2/19/2012,1,1,2,0,2,0.28,0.265788,0.515833,0.253112,618,2071,2689
|
||||
416,2/20/2012,1,1,2,1,1,0.28,0.273391,0.507826,0.229083,502,2627,3129
|
||||
417,2/21/2012,1,1,2,2,1,0.287826,0.295113,0.594348,0.205717,163,3614,3777
|
||||
418,2/22/2012,1,1,2,3,1,0.395833,0.392667,0.567917,0.234471,394,4379,4773
|
||||
419,2/23/2012,1,1,2,4,1,0.454167,0.444446,0.554583,0.190913,516,4546,5062
|
||||
420,2/24/2012,1,1,2,5,2,0.4075,0.410971,0.7375,0.237567,246,3241,3487
|
||||
421,2/25/2012,1,1,2,6,1,0.290833,0.255675,0.395833,0.421642,317,2415,2732
|
||||
422,2/26/2012,1,1,2,0,1,0.279167,0.268308,0.41,0.205229,515,2874,3389
|
||||
423,2/27/2012,1,1,2,1,1,0.366667,0.357954,0.490833,0.268033,253,4069,4322
|
||||
424,2/28/2012,1,1,2,2,1,0.359167,0.353525,0.395833,0.193417,229,4134,4363
|
||||
425,2/29/2012,1,1,2,3,2,0.344348,0.34847,0.804783,0.179117,65,1769,1834
|
||||
426,3/1/2012,1,1,3,4,1,0.485833,0.475371,0.615417,0.226987,325,4665,4990
|
||||
427,3/2/2012,1,1,3,5,2,0.353333,0.359842,0.657083,0.144904,246,2948,3194
|
||||
428,3/3/2012,1,1,3,6,2,0.414167,0.413492,0.62125,0.161079,956,3110,4066
|
||||
429,3/4/2012,1,1,3,0,1,0.325833,0.303021,0.403333,0.334571,710,2713,3423
|
||||
430,3/5/2012,1,1,3,1,1,0.243333,0.241171,0.50625,0.228858,203,3130,3333
|
||||
431,3/6/2012,1,1,3,2,1,0.258333,0.255042,0.456667,0.200875,221,3735,3956
|
||||
432,3/7/2012,1,1,3,3,1,0.404167,0.3851,0.513333,0.345779,432,4484,4916
|
||||
433,3/8/2012,1,1,3,4,1,0.5275,0.524604,0.5675,0.441563,486,4896,5382
|
||||
434,3/9/2012,1,1,3,5,2,0.410833,0.397083,0.407083,0.4148,447,4122,4569
|
||||
435,3/10/2012,1,1,3,6,1,0.2875,0.277767,0.350417,0.22575,968,3150,4118
|
||||
436,3/11/2012,1,1,3,0,1,0.361739,0.35967,0.476957,0.222587,1658,3253,4911
|
||||
437,3/12/2012,1,1,3,1,1,0.466667,0.459592,0.489167,0.207713,838,4460,5298
|
||||
438,3/13/2012,1,1,3,2,1,0.565,0.542929,0.6175,0.23695,762,5085,5847
|
||||
439,3/14/2012,1,1,3,3,1,0.5725,0.548617,0.507083,0.115062,997,5315,6312
|
||||
440,3/15/2012,1,1,3,4,1,0.5575,0.532825,0.579583,0.149883,1005,5187,6192
|
||||
441,3/16/2012,1,1,3,5,2,0.435833,0.436229,0.842083,0.113192,548,3830,4378
|
||||
442,3/17/2012,1,1,3,6,2,0.514167,0.505046,0.755833,0.110704,3155,4681,7836
|
||||
443,3/18/2012,1,1,3,0,2,0.4725,0.464,0.81,0.126883,2207,3685,5892
|
||||
444,3/19/2012,1,1,3,1,1,0.545,0.532821,0.72875,0.162317,982,5171,6153
|
||||
445,3/20/2012,1,1,3,2,1,0.560833,0.538533,0.807917,0.121271,1051,5042,6093
|
||||
446,3/21/2012,2,1,3,3,2,0.531667,0.513258,0.82125,0.0895583,1122,5108,6230
|
||||
447,3/22/2012,2,1,3,4,1,0.554167,0.531567,0.83125,0.117562,1334,5537,6871
|
||||
448,3/23/2012,2,1,3,5,2,0.601667,0.570067,0.694167,0.1163,2469,5893,8362
|
||||
449,3/24/2012,2,1,3,6,2,0.5025,0.486733,0.885417,0.192783,1033,2339,3372
|
||||
450,3/25/2012,2,1,3,0,2,0.4375,0.437488,0.880833,0.220775,1532,3464,4996
|
||||
451,3/26/2012,2,1,3,1,1,0.445833,0.43875,0.477917,0.386821,795,4763,5558
|
||||
452,3/27/2012,2,1,3,2,1,0.323333,0.315654,0.29,0.187192,531,4571,5102
|
||||
453,3/28/2012,2,1,3,3,1,0.484167,0.47095,0.48125,0.291671,674,5024,5698
|
||||
454,3/29/2012,2,1,3,4,1,0.494167,0.482304,0.439167,0.31965,834,5299,6133
|
||||
455,3/30/2012,2,1,3,5,2,0.37,0.375621,0.580833,0.138067,796,4663,5459
|
||||
456,3/31/2012,2,1,3,6,2,0.424167,0.421708,0.738333,0.250617,2301,3934,6235
|
||||
457,4/1/2012,2,1,4,0,2,0.425833,0.417287,0.67625,0.172267,2347,3694,6041
|
||||
458,4/2/2012,2,1,4,1,1,0.433913,0.427513,0.504348,0.312139,1208,4728,5936
|
||||
459,4/3/2012,2,1,4,2,1,0.466667,0.461483,0.396667,0.100133,1348,5424,6772
|
||||
460,4/4/2012,2,1,4,3,1,0.541667,0.53345,0.469583,0.180975,1058,5378,6436
|
||||
461,4/5/2012,2,1,4,4,1,0.435,0.431163,0.374167,0.219529,1192,5265,6457
|
||||
462,4/6/2012,2,1,4,5,1,0.403333,0.390767,0.377083,0.300388,1807,4653,6460
|
||||
463,4/7/2012,2,1,4,6,1,0.4375,0.426129,0.254167,0.274871,3252,3605,6857
|
||||
464,4/8/2012,2,1,4,0,1,0.5,0.492425,0.275833,0.232596,2230,2939,5169
|
||||
465,4/9/2012,2,1,4,1,1,0.489167,0.476638,0.3175,0.358196,905,4680,5585
|
||||
466,4/10/2012,2,1,4,2,1,0.446667,0.436233,0.435,0.249375,819,5099,5918
|
||||
467,4/11/2012,2,1,4,3,1,0.348696,0.337274,0.469565,0.295274,482,4380,4862
|
||||
468,4/12/2012,2,1,4,4,1,0.3975,0.387604,0.46625,0.290429,663,4746,5409
|
||||
469,4/13/2012,2,1,4,5,1,0.4425,0.431808,0.408333,0.155471,1252,5146,6398
|
||||
470,4/14/2012,2,1,4,6,1,0.495,0.487996,0.502917,0.190917,2795,4665,7460
|
||||
471,4/15/2012,2,1,4,0,1,0.606667,0.573875,0.507917,0.225129,2846,4286,7132
|
||||
472,4/16/2012,2,1,4,1,1,0.664167,0.614925,0.561667,0.284829,1198,5172,6370
|
||||
473,4/17/2012,2,1,4,2,1,0.608333,0.598487,0.390417,0.273629,989,5702,6691
|
||||
474,4/18/2012,2,1,4,3,2,0.463333,0.457038,0.569167,0.167912,347,4020,4367
|
||||
475,4/19/2012,2,1,4,4,1,0.498333,0.493046,0.6125,0.0659292,846,5719,6565
|
||||
476,4/20/2012,2,1,4,5,1,0.526667,0.515775,0.694583,0.149871,1340,5950,7290
|
||||
477,4/21/2012,2,1,4,6,1,0.57,0.542921,0.682917,0.283587,2541,4083,6624
|
||||
478,4/22/2012,2,1,4,0,3,0.396667,0.389504,0.835417,0.344546,120,907,1027
|
||||
479,4/23/2012,2,1,4,1,2,0.321667,0.301125,0.766667,0.303496,195,3019,3214
|
||||
480,4/24/2012,2,1,4,2,1,0.413333,0.405283,0.454167,0.249383,518,5115,5633
|
||||
481,4/25/2012,2,1,4,3,1,0.476667,0.470317,0.427917,0.118792,655,5541,6196
|
||||
482,4/26/2012,2,1,4,4,2,0.498333,0.483583,0.756667,0.176625,475,4551,5026
|
||||
483,4/27/2012,2,1,4,5,1,0.4575,0.452637,0.400833,0.347633,1014,5219,6233
|
||||
484,4/28/2012,2,1,4,6,2,0.376667,0.377504,0.489583,0.129975,1120,3100,4220
|
||||
485,4/29/2012,2,1,4,0,1,0.458333,0.450121,0.587083,0.116908,2229,4075,6304
|
||||
486,4/30/2012,2,1,4,1,2,0.464167,0.457696,0.57,0.171638,665,4907,5572
|
||||
487,5/1/2012,2,1,5,2,2,0.613333,0.577021,0.659583,0.156096,653,5087,5740
|
||||
488,5/2/2012,2,1,5,3,1,0.564167,0.537896,0.797083,0.138058,667,5502,6169
|
||||
489,5/3/2012,2,1,5,4,2,0.56,0.537242,0.768333,0.133696,764,5657,6421
|
||||
490,5/4/2012,2,1,5,5,1,0.6275,0.590917,0.735417,0.162938,1069,5227,6296
|
||||
491,5/5/2012,2,1,5,6,2,0.621667,0.584608,0.756667,0.152992,2496,4387,6883
|
||||
492,5/6/2012,2,1,5,0,2,0.5625,0.546737,0.74,0.149879,2135,4224,6359
|
||||
493,5/7/2012,2,1,5,1,2,0.5375,0.527142,0.664167,0.230721,1008,5265,6273
|
||||
494,5/8/2012,2,1,5,2,2,0.581667,0.557471,0.685833,0.296029,738,4990,5728
|
||||
495,5/9/2012,2,1,5,3,2,0.575,0.553025,0.744167,0.216412,620,4097,4717
|
||||
496,5/10/2012,2,1,5,4,1,0.505833,0.491783,0.552083,0.314063,1026,5546,6572
|
||||
497,5/11/2012,2,1,5,5,1,0.533333,0.520833,0.360417,0.236937,1319,5711,7030
|
||||
498,5/12/2012,2,1,5,6,1,0.564167,0.544817,0.480417,0.123133,2622,4807,7429
|
||||
499,5/13/2012,2,1,5,0,1,0.6125,0.585238,0.57625,0.225117,2172,3946,6118
|
||||
500,5/14/2012,2,1,5,1,2,0.573333,0.5499,0.789583,0.212692,342,2501,2843
|
||||
501,5/15/2012,2,1,5,2,2,0.611667,0.576404,0.794583,0.147392,625,4490,5115
|
||||
502,5/16/2012,2,1,5,3,1,0.636667,0.595975,0.697917,0.122512,991,6433,7424
|
||||
503,5/17/2012,2,1,5,4,1,0.593333,0.572613,0.52,0.229475,1242,6142,7384
|
||||
504,5/18/2012,2,1,5,5,1,0.564167,0.551121,0.523333,0.136817,1521,6118,7639
|
||||
505,5/19/2012,2,1,5,6,1,0.6,0.566908,0.45625,0.083975,3410,4884,8294
|
||||
506,5/20/2012,2,1,5,0,1,0.620833,0.583967,0.530417,0.254367,2704,4425,7129
|
||||
507,5/21/2012,2,1,5,1,2,0.598333,0.565667,0.81125,0.233204,630,3729,4359
|
||||
508,5/22/2012,2,1,5,2,2,0.615,0.580825,0.765833,0.118167,819,5254,6073
|
||||
509,5/23/2012,2,1,5,3,2,0.621667,0.584612,0.774583,0.102,766,4494,5260
|
||||
510,5/24/2012,2,1,5,4,1,0.655,0.6067,0.716667,0.172896,1059,5711,6770
|
||||
511,5/25/2012,2,1,5,5,1,0.68,0.627529,0.747083,0.14055,1417,5317,6734
|
||||
512,5/26/2012,2,1,5,6,1,0.6925,0.642696,0.7325,0.198992,2855,3681,6536
|
||||
513,5/27/2012,2,1,5,0,1,0.69,0.641425,0.697083,0.215171,3283,3308,6591
|
||||
514,5/28/2012,2,1,5,1,1,0.7125,0.6793,0.67625,0.196521,2557,3486,6043
|
||||
515,5/29/2012,2,1,5,2,1,0.7225,0.672992,0.684583,0.2954,880,4863,5743
|
||||
516,5/30/2012,2,1,5,3,2,0.656667,0.611129,0.67,0.134329,745,6110,6855
|
||||
517,5/31/2012,2,1,5,4,1,0.68,0.631329,0.492917,0.195279,1100,6238,7338
|
||||
518,6/1/2012,2,1,6,5,2,0.654167,0.607962,0.755417,0.237563,533,3594,4127
|
||||
519,6/2/2012,2,1,6,6,1,0.583333,0.566288,0.549167,0.186562,2795,5325,8120
|
||||
520,6/3/2012,2,1,6,0,1,0.6025,0.575133,0.493333,0.184087,2494,5147,7641
|
||||
521,6/4/2012,2,1,6,1,1,0.5975,0.578283,0.487083,0.284833,1071,5927,6998
|
||||
522,6/5/2012,2,1,6,2,2,0.540833,0.525892,0.613333,0.209575,968,6033,7001
|
||||
523,6/6/2012,2,1,6,3,1,0.554167,0.542292,0.61125,0.077125,1027,6028,7055
|
||||
524,6/7/2012,2,1,6,4,1,0.6025,0.569442,0.567083,0.15735,1038,6456,7494
|
||||
525,6/8/2012,2,1,6,5,1,0.649167,0.597862,0.467917,0.175383,1488,6248,7736
|
||||
526,6/9/2012,2,1,6,6,1,0.710833,0.648367,0.437083,0.144287,2708,4790,7498
|
||||
527,6/10/2012,2,1,6,0,1,0.726667,0.663517,0.538333,0.133721,2224,4374,6598
|
||||
528,6/11/2012,2,1,6,1,2,0.720833,0.659721,0.587917,0.207713,1017,5647,6664
|
||||
529,6/12/2012,2,1,6,2,2,0.653333,0.597875,0.833333,0.214546,477,4495,4972
|
||||
530,6/13/2012,2,1,6,3,1,0.655833,0.611117,0.582083,0.343279,1173,6248,7421
|
||||
531,6/14/2012,2,1,6,4,1,0.648333,0.624383,0.569583,0.253733,1180,6183,7363
|
||||
532,6/15/2012,2,1,6,5,1,0.639167,0.599754,0.589583,0.176617,1563,6102,7665
|
||||
533,6/16/2012,2,1,6,6,1,0.631667,0.594708,0.504167,0.166667,2963,4739,7702
|
||||
534,6/17/2012,2,1,6,0,1,0.5925,0.571975,0.59875,0.144904,2634,4344,6978
|
||||
535,6/18/2012,2,1,6,1,2,0.568333,0.544842,0.777917,0.174746,653,4446,5099
|
||||
536,6/19/2012,2,1,6,2,1,0.688333,0.654692,0.69,0.148017,968,5857,6825
|
||||
537,6/20/2012,2,1,6,3,1,0.7825,0.720975,0.592083,0.113812,872,5339,6211
|
||||
538,6/21/2012,3,1,6,4,1,0.805833,0.752542,0.567917,0.118787,778,5127,5905
|
||||
539,6/22/2012,3,1,6,5,1,0.7775,0.724121,0.57375,0.182842,964,4859,5823
|
||||
540,6/23/2012,3,1,6,6,1,0.731667,0.652792,0.534583,0.179721,2657,4801,7458
|
||||
541,6/24/2012,3,1,6,0,1,0.743333,0.674254,0.479167,0.145525,2551,4340,6891
|
||||
542,6/25/2012,3,1,6,1,1,0.715833,0.654042,0.504167,0.300383,1139,5640,6779
|
||||
543,6/26/2012,3,1,6,2,1,0.630833,0.594704,0.373333,0.347642,1077,6365,7442
|
||||
544,6/27/2012,3,1,6,3,1,0.6975,0.640792,0.36,0.271775,1077,6258,7335
|
||||
545,6/28/2012,3,1,6,4,1,0.749167,0.675512,0.4225,0.17165,921,5958,6879
|
||||
546,6/29/2012,3,1,6,5,1,0.834167,0.786613,0.48875,0.165417,829,4634,5463
|
||||
547,6/30/2012,3,1,6,6,1,0.765,0.687508,0.60125,0.161071,1455,4232,5687
|
||||
548,7/1/2012,3,1,7,0,1,0.815833,0.750629,0.51875,0.168529,1421,4110,5531
|
||||
549,7/2/2012,3,1,7,1,1,0.781667,0.702038,0.447083,0.195267,904,5323,6227
|
||||
550,7/3/2012,3,1,7,2,1,0.780833,0.70265,0.492083,0.126237,1052,5608,6660
|
||||
551,7/4/2012,3,1,7,3,1,0.789167,0.732337,0.53875,0.13495,2562,4841,7403
|
||||
552,7/5/2012,3,1,7,4,1,0.8275,0.761367,0.457917,0.194029,1405,4836,6241
|
||||
553,7/6/2012,3,1,7,5,1,0.828333,0.752533,0.450833,0.146142,1366,4841,6207
|
||||
554,7/7/2012,3,1,7,6,1,0.861667,0.804913,0.492083,0.163554,1448,3392,4840
|
||||
555,7/8/2012,3,1,7,0,1,0.8225,0.790396,0.57375,0.125629,1203,3469,4672
|
||||
556,7/9/2012,3,1,7,1,2,0.710833,0.654054,0.683333,0.180975,998,5571,6569
|
||||
557,7/10/2012,3,1,7,2,2,0.720833,0.664796,0.6675,0.151737,954,5336,6290
|
||||
558,7/11/2012,3,1,7,3,1,0.716667,0.650271,0.633333,0.151733,975,6289,7264
|
||||
559,7/12/2012,3,1,7,4,1,0.715833,0.654683,0.529583,0.146775,1032,6414,7446
|
||||
560,7/13/2012,3,1,7,5,2,0.731667,0.667933,0.485833,0.08085,1511,5988,7499
|
||||
561,7/14/2012,3,1,7,6,2,0.703333,0.666042,0.699167,0.143679,2355,4614,6969
|
||||
562,7/15/2012,3,1,7,0,1,0.745833,0.705196,0.717917,0.166667,1920,4111,6031
|
||||
563,7/16/2012,3,1,7,1,1,0.763333,0.724125,0.645,0.164187,1088,5742,6830
|
||||
564,7/17/2012,3,1,7,2,1,0.818333,0.755683,0.505833,0.114429,921,5865,6786
|
||||
565,7/18/2012,3,1,7,3,1,0.793333,0.745583,0.577083,0.137442,799,4914,5713
|
||||
566,7/19/2012,3,1,7,4,1,0.77,0.714642,0.600417,0.165429,888,5703,6591
|
||||
567,7/20/2012,3,1,7,5,2,0.665833,0.613025,0.844167,0.208967,747,5123,5870
|
||||
568,7/21/2012,3,1,7,6,3,0.595833,0.549912,0.865417,0.2133,1264,3195,4459
|
||||
569,7/22/2012,3,1,7,0,2,0.6675,0.623125,0.7625,0.0939208,2544,4866,7410
|
||||
570,7/23/2012,3,1,7,1,1,0.741667,0.690017,0.694167,0.138683,1135,5831,6966
|
||||
571,7/24/2012,3,1,7,2,1,0.750833,0.70645,0.655,0.211454,1140,6452,7592
|
||||
572,7/25/2012,3,1,7,3,1,0.724167,0.654054,0.45,0.1648,1383,6790,8173
|
||||
573,7/26/2012,3,1,7,4,1,0.776667,0.739263,0.596667,0.284813,1036,5825,6861
|
||||
574,7/27/2012,3,1,7,5,1,0.781667,0.734217,0.594583,0.152992,1259,5645,6904
|
||||
575,7/28/2012,3,1,7,6,1,0.755833,0.697604,0.613333,0.15735,2234,4451,6685
|
||||
576,7/29/2012,3,1,7,0,1,0.721667,0.667933,0.62375,0.170396,2153,4444,6597
|
||||
577,7/30/2012,3,1,7,1,1,0.730833,0.684987,0.66875,0.153617,1040,6065,7105
|
||||
578,7/31/2012,3,1,7,2,1,0.713333,0.662896,0.704167,0.165425,968,6248,7216
|
||||
579,8/1/2012,3,1,8,3,1,0.7175,0.667308,0.6775,0.141179,1074,6506,7580
|
||||
580,8/2/2012,3,1,8,4,1,0.7525,0.707088,0.659583,0.129354,983,6278,7261
|
||||
581,8/3/2012,3,1,8,5,2,0.765833,0.722867,0.6425,0.215792,1328,5847,7175
|
||||
582,8/4/2012,3,1,8,6,1,0.793333,0.751267,0.613333,0.257458,2345,4479,6824
|
||||
583,8/5/2012,3,1,8,0,1,0.769167,0.731079,0.6525,0.290421,1707,3757,5464
|
||||
584,8/6/2012,3,1,8,1,2,0.7525,0.710246,0.654167,0.129354,1233,5780,7013
|
||||
585,8/7/2012,3,1,8,2,2,0.735833,0.697621,0.70375,0.116908,1278,5995,7273
|
||||
586,8/8/2012,3,1,8,3,2,0.75,0.707717,0.672917,0.1107,1263,6271,7534
|
||||
587,8/9/2012,3,1,8,4,1,0.755833,0.699508,0.620417,0.1561,1196,6090,7286
|
||||
588,8/10/2012,3,1,8,5,2,0.715833,0.667942,0.715833,0.238813,1065,4721,5786
|
||||
589,8/11/2012,3,1,8,6,2,0.6925,0.638267,0.732917,0.206479,2247,4052,6299
|
||||
590,8/12/2012,3,1,8,0,1,0.700833,0.644579,0.530417,0.122512,2182,4362,6544
|
||||
591,8/13/2012,3,1,8,1,1,0.720833,0.662254,0.545417,0.136212,1207,5676,6883
|
||||
592,8/14/2012,3,1,8,2,1,0.726667,0.676779,0.686667,0.169158,1128,5656,6784
|
||||
593,8/15/2012,3,1,8,3,1,0.706667,0.654037,0.619583,0.169771,1198,6149,7347
|
||||
594,8/16/2012,3,1,8,4,1,0.719167,0.654688,0.519167,0.141796,1338,6267,7605
|
||||
595,8/17/2012,3,1,8,5,1,0.723333,0.2424,0.570833,0.231354,1483,5665,7148
|
||||
596,8/18/2012,3,1,8,6,1,0.678333,0.618071,0.603333,0.177867,2827,5038,7865
|
||||
597,8/19/2012,3,1,8,0,2,0.635833,0.603554,0.711667,0.08645,1208,3341,4549
|
||||
598,8/20/2012,3,1,8,1,2,0.635833,0.595967,0.734167,0.129979,1026,5504,6530
|
||||
599,8/21/2012,3,1,8,2,1,0.649167,0.601025,0.67375,0.0727708,1081,5925,7006
|
||||
600,8/22/2012,3,1,8,3,1,0.6675,0.621854,0.677083,0.0702833,1094,6281,7375
|
||||
601,8/23/2012,3,1,8,4,1,0.695833,0.637008,0.635833,0.0845958,1363,6402,7765
|
||||
602,8/24/2012,3,1,8,5,2,0.7025,0.6471,0.615,0.0721458,1325,6257,7582
|
||||
603,8/25/2012,3,1,8,6,2,0.661667,0.618696,0.712917,0.244408,1829,4224,6053
|
||||
604,8/26/2012,3,1,8,0,2,0.653333,0.595996,0.845833,0.228858,1483,3772,5255
|
||||
605,8/27/2012,3,1,8,1,1,0.703333,0.654688,0.730417,0.128733,989,5928,6917
|
||||
606,8/28/2012,3,1,8,2,1,0.728333,0.66605,0.62,0.190925,935,6105,7040
|
||||
607,8/29/2012,3,1,8,3,1,0.685,0.635733,0.552083,0.112562,1177,6520,7697
|
||||
608,8/30/2012,3,1,8,4,1,0.706667,0.652779,0.590417,0.0771167,1172,6541,7713
|
||||
609,8/31/2012,3,1,8,5,1,0.764167,0.6894,0.5875,0.168533,1433,5917,7350
|
||||
610,9/1/2012,3,1,9,6,2,0.753333,0.702654,0.638333,0.113187,2352,3788,6140
|
||||
611,9/2/2012,3,1,9,0,2,0.696667,0.649,0.815,0.0640708,2613,3197,5810
|
||||
612,9/3/2012,3,1,9,1,1,0.7075,0.661629,0.790833,0.151121,1965,4069,6034
|
||||
613,9/4/2012,3,1,9,2,1,0.725833,0.686888,0.755,0.236321,867,5997,6864
|
||||
614,9/5/2012,3,1,9,3,1,0.736667,0.708983,0.74125,0.187808,832,6280,7112
|
||||
615,9/6/2012,3,1,9,4,2,0.696667,0.655329,0.810417,0.142421,611,5592,6203
|
||||
616,9/7/2012,3,1,9,5,1,0.703333,0.657204,0.73625,0.171646,1045,6459,7504
|
||||
617,9/8/2012,3,1,9,6,2,0.659167,0.611121,0.799167,0.281104,1557,4419,5976
|
||||
618,9/9/2012,3,1,9,0,1,0.61,0.578925,0.5475,0.224496,2570,5657,8227
|
||||
619,9/10/2012,3,1,9,1,1,0.583333,0.565654,0.50375,0.258713,1118,6407,7525
|
||||
620,9/11/2012,3,1,9,2,1,0.5775,0.554292,0.52,0.0920542,1070,6697,7767
|
||||
621,9/12/2012,3,1,9,3,1,0.599167,0.570075,0.577083,0.131846,1050,6820,7870
|
||||
622,9/13/2012,3,1,9,4,1,0.6125,0.579558,0.637083,0.0827208,1054,6750,7804
|
||||
623,9/14/2012,3,1,9,5,1,0.633333,0.594083,0.6725,0.103863,1379,6630,8009
|
||||
624,9/15/2012,3,1,9,6,1,0.608333,0.585867,0.501667,0.247521,3160,5554,8714
|
||||
625,9/16/2012,3,1,9,0,1,0.58,0.563125,0.57,0.0901833,2166,5167,7333
|
||||
626,9/17/2012,3,1,9,1,2,0.580833,0.55305,0.734583,0.151742,1022,5847,6869
|
||||
627,9/18/2012,3,1,9,2,2,0.623333,0.565067,0.8725,0.357587,371,3702,4073
|
||||
628,9/19/2012,3,1,9,3,1,0.5525,0.540404,0.536667,0.215175,788,6803,7591
|
||||
629,9/20/2012,3,1,9,4,1,0.546667,0.532192,0.618333,0.118167,939,6781,7720
|
||||
630,9/21/2012,3,1,9,5,1,0.599167,0.571971,0.66875,0.154229,1250,6917,8167
|
||||
631,9/22/2012,3,1,9,6,1,0.65,0.610488,0.646667,0.283583,2512,5883,8395
|
||||
632,9/23/2012,4,1,9,0,1,0.529167,0.518933,0.467083,0.223258,2454,5453,7907
|
||||
633,9/24/2012,4,1,9,1,1,0.514167,0.502513,0.492917,0.142404,1001,6435,7436
|
||||
634,9/25/2012,4,1,9,2,1,0.55,0.544179,0.57,0.236321,845,6693,7538
|
||||
635,9/26/2012,4,1,9,3,1,0.635,0.596613,0.630833,0.2444,787,6946,7733
|
||||
636,9/27/2012,4,1,9,4,2,0.65,0.607975,0.690833,0.134342,751,6642,7393
|
||||
637,9/28/2012,4,1,9,5,2,0.619167,0.585863,0.69,0.164179,1045,6370,7415
|
||||
638,9/29/2012,4,1,9,6,1,0.5425,0.530296,0.542917,0.227604,2589,5966,8555
|
||||
639,9/30/2012,4,1,9,0,1,0.526667,0.517663,0.583333,0.134958,2015,4874,6889
|
||||
640,10/1/2012,4,1,10,1,2,0.520833,0.512,0.649167,0.0908042,763,6015,6778
|
||||
641,10/2/2012,4,1,10,2,3,0.590833,0.542333,0.871667,0.104475,315,4324,4639
|
||||
642,10/3/2012,4,1,10,3,2,0.6575,0.599133,0.79375,0.0665458,728,6844,7572
|
||||
643,10/4/2012,4,1,10,4,2,0.6575,0.607975,0.722917,0.117546,891,6437,7328
|
||||
644,10/5/2012,4,1,10,5,1,0.615,0.580187,0.6275,0.10635,1516,6640,8156
|
||||
645,10/6/2012,4,1,10,6,1,0.554167,0.538521,0.664167,0.268025,3031,4934,7965
|
||||
646,10/7/2012,4,1,10,0,2,0.415833,0.419813,0.708333,0.141162,781,2729,3510
|
||||
647,10/8/2012,4,1,10,1,2,0.383333,0.387608,0.709583,0.189679,874,4604,5478
|
||||
648,10/9/2012,4,1,10,2,2,0.446667,0.438112,0.761667,0.1903,601,5791,6392
|
||||
649,10/10/2012,4,1,10,3,1,0.514167,0.503142,0.630833,0.187821,780,6911,7691
|
||||
650,10/11/2012,4,1,10,4,1,0.435,0.431167,0.463333,0.181596,834,6736,7570
|
||||
651,10/12/2012,4,1,10,5,1,0.4375,0.433071,0.539167,0.235092,1060,6222,7282
|
||||
652,10/13/2012,4,1,10,6,1,0.393333,0.391396,0.494583,0.146142,2252,4857,7109
|
||||
653,10/14/2012,4,1,10,0,1,0.521667,0.508204,0.640417,0.278612,2080,4559,6639
|
||||
654,10/15/2012,4,1,10,1,2,0.561667,0.53915,0.7075,0.296037,760,5115,5875
|
||||
655,10/16/2012,4,1,10,2,1,0.468333,0.460846,0.558333,0.182221,922,6612,7534
|
||||
656,10/17/2012,4,1,10,3,1,0.455833,0.450108,0.692917,0.101371,979,6482,7461
|
||||
657,10/18/2012,4,1,10,4,2,0.5225,0.512625,0.728333,0.236937,1008,6501,7509
|
||||
658,10/19/2012,4,1,10,5,2,0.563333,0.537896,0.815,0.134954,753,4671,5424
|
||||
659,10/20/2012,4,1,10,6,1,0.484167,0.472842,0.572917,0.117537,2806,5284,8090
|
||||
660,10/21/2012,4,1,10,0,1,0.464167,0.456429,0.51,0.166054,2132,4692,6824
|
||||
661,10/22/2012,4,1,10,1,1,0.4875,0.482942,0.568333,0.0814833,830,6228,7058
|
||||
662,10/23/2012,4,1,10,2,1,0.544167,0.530304,0.641667,0.0945458,841,6625,7466
|
||||
663,10/24/2012,4,1,10,3,1,0.5875,0.558721,0.63625,0.0727792,795,6898,7693
|
||||
664,10/25/2012,4,1,10,4,2,0.55,0.529688,0.800417,0.124375,875,6484,7359
|
||||
665,10/26/2012,4,1,10,5,2,0.545833,0.52275,0.807083,0.132467,1182,6262,7444
|
||||
666,10/27/2012,4,1,10,6,2,0.53,0.515133,0.72,0.235692,2643,5209,7852
|
||||
667,10/28/2012,4,1,10,0,2,0.4775,0.467771,0.694583,0.398008,998,3461,4459
|
||||
668,10/29/2012,4,1,10,1,3,0.44,0.4394,0.88,0.3582,2,20,22
|
||||
669,10/30/2012,4,1,10,2,2,0.318182,0.309909,0.825455,0.213009,87,1009,1096
|
||||
670,10/31/2012,4,1,10,3,2,0.3575,0.3611,0.666667,0.166667,419,5147,5566
|
||||
671,11/1/2012,4,1,11,4,2,0.365833,0.369942,0.581667,0.157346,466,5520,5986
|
||||
672,11/2/2012,4,1,11,5,1,0.355,0.356042,0.522083,0.266175,618,5229,5847
|
||||
673,11/3/2012,4,1,11,6,2,0.343333,0.323846,0.49125,0.270529,1029,4109,5138
|
||||
674,11/4/2012,4,1,11,0,1,0.325833,0.329538,0.532917,0.179108,1201,3906,5107
|
||||
675,11/5/2012,4,1,11,1,1,0.319167,0.308075,0.494167,0.236325,378,4881,5259
|
||||
676,11/6/2012,4,1,11,2,1,0.280833,0.281567,0.567083,0.173513,466,5220,5686
|
||||
677,11/7/2012,4,1,11,3,2,0.295833,0.274621,0.5475,0.304108,326,4709,5035
|
||||
678,11/8/2012,4,1,11,4,1,0.352174,0.341891,0.333478,0.347835,340,4975,5315
|
||||
679,11/9/2012,4,1,11,5,1,0.361667,0.355413,0.540833,0.214558,709,5283,5992
|
||||
680,11/10/2012,4,1,11,6,1,0.389167,0.393937,0.645417,0.0578458,2090,4446,6536
|
||||
681,11/11/2012,4,1,11,0,1,0.420833,0.421713,0.659167,0.1275,2290,4562,6852
|
||||
682,11/12/2012,4,1,11,1,1,0.485,0.475383,0.741667,0.173517,1097,5172,6269
|
||||
683,11/13/2012,4,1,11,2,2,0.343333,0.323225,0.662917,0.342046,327,3767,4094
|
||||
684,11/14/2012,4,1,11,3,1,0.289167,0.281563,0.552083,0.199625,373,5122,5495
|
||||
685,11/15/2012,4,1,11,4,2,0.321667,0.324492,0.620417,0.152987,320,5125,5445
|
||||
686,11/16/2012,4,1,11,5,1,0.345,0.347204,0.524583,0.171025,484,5214,5698
|
||||
687,11/17/2012,4,1,11,6,1,0.325,0.326383,0.545417,0.179729,1313,4316,5629
|
||||
688,11/18/2012,4,1,11,0,1,0.3425,0.337746,0.692917,0.227612,922,3747,4669
|
||||
689,11/19/2012,4,1,11,1,2,0.380833,0.375621,0.623333,0.235067,449,5050,5499
|
||||
690,11/20/2012,4,1,11,2,2,0.374167,0.380667,0.685,0.082725,534,5100,5634
|
||||
691,11/21/2012,4,1,11,3,1,0.353333,0.364892,0.61375,0.103246,615,4531,5146
|
||||
692,11/22/2012,4,1,11,4,1,0.34,0.350371,0.580417,0.0528708,955,1470,2425
|
||||
693,11/23/2012,4,1,11,5,1,0.368333,0.378779,0.56875,0.148021,1603,2307,3910
|
||||
694,11/24/2012,4,1,11,6,1,0.278333,0.248742,0.404583,0.376871,532,1745,2277
|
||||
695,11/25/2012,4,1,11,0,1,0.245833,0.257583,0.468333,0.1505,309,2115,2424
|
||||
696,11/26/2012,4,1,11,1,1,0.313333,0.339004,0.535417,0.04665,337,4750,5087
|
||||
697,11/27/2012,4,1,11,2,2,0.291667,0.281558,0.786667,0.237562,123,3836,3959
|
||||
698,11/28/2012,4,1,11,3,1,0.296667,0.289762,0.50625,0.210821,198,5062,5260
|
||||
699,11/29/2012,4,1,11,4,1,0.28087,0.298422,0.555652,0.115522,243,5080,5323
|
||||
700,11/30/2012,4,1,11,5,1,0.298333,0.323867,0.649583,0.0584708,362,5306,5668
|
||||
701,12/1/2012,4,1,12,6,2,0.298333,0.316904,0.806667,0.0597042,951,4240,5191
|
||||
702,12/2/2012,4,1,12,0,2,0.3475,0.359208,0.823333,0.124379,892,3757,4649
|
||||
703,12/3/2012,4,1,12,1,1,0.4525,0.455796,0.7675,0.0827208,555,5679,6234
|
||||
704,12/4/2012,4,1,12,2,1,0.475833,0.469054,0.73375,0.174129,551,6055,6606
|
||||
705,12/5/2012,4,1,12,3,1,0.438333,0.428012,0.485,0.324021,331,5398,5729
|
||||
706,12/6/2012,4,1,12,4,1,0.255833,0.258204,0.50875,0.174754,340,5035,5375
|
||||
707,12/7/2012,4,1,12,5,2,0.320833,0.321958,0.764167,0.1306,349,4659,5008
|
||||
708,12/8/2012,4,1,12,6,2,0.381667,0.389508,0.91125,0.101379,1153,4429,5582
|
||||
709,12/9/2012,4,1,12,0,2,0.384167,0.390146,0.905417,0.157975,441,2787,3228
|
||||
710,12/10/2012,4,1,12,1,2,0.435833,0.435575,0.925,0.190308,329,4841,5170
|
||||
711,12/11/2012,4,1,12,2,2,0.353333,0.338363,0.596667,0.296037,282,5219,5501
|
||||
712,12/12/2012,4,1,12,3,2,0.2975,0.297338,0.538333,0.162937,310,5009,5319
|
||||
713,12/13/2012,4,1,12,4,1,0.295833,0.294188,0.485833,0.174129,425,5107,5532
|
||||
714,12/14/2012,4,1,12,5,1,0.281667,0.294192,0.642917,0.131229,429,5182,5611
|
||||
715,12/15/2012,4,1,12,6,1,0.324167,0.338383,0.650417,0.10635,767,4280,5047
|
||||
716,12/16/2012,4,1,12,0,2,0.3625,0.369938,0.83875,0.100742,538,3248,3786
|
||||
717,12/17/2012,4,1,12,1,2,0.393333,0.4015,0.907083,0.0982583,212,4373,4585
|
||||
718,12/18/2012,4,1,12,2,1,0.410833,0.409708,0.66625,0.221404,433,5124,5557
|
||||
719,12/19/2012,4,1,12,3,1,0.3325,0.342162,0.625417,0.184092,333,4934,5267
|
||||
720,12/20/2012,4,1,12,4,2,0.33,0.335217,0.667917,0.132463,314,3814,4128
|
||||
721,12/21/2012,1,1,12,5,2,0.326667,0.301767,0.556667,0.374383,221,3402,3623
|
||||
722,12/22/2012,1,1,12,6,1,0.265833,0.236113,0.44125,0.407346,205,1544,1749
|
||||
723,12/23/2012,1,1,12,0,1,0.245833,0.259471,0.515417,0.133083,408,1379,1787
|
||||
724,12/24/2012,1,1,12,1,2,0.231304,0.2589,0.791304,0.0772304,174,746,920
|
||||
725,12/25/2012,1,1,12,2,2,0.291304,0.294465,0.734783,0.168726,440,573,1013
|
||||
726,12/26/2012,1,1,12,3,3,0.243333,0.220333,0.823333,0.316546,9,432,441
|
||||
727,12/27/2012,1,1,12,4,2,0.254167,0.226642,0.652917,0.350133,247,1867,2114
|
||||
728,12/28/2012,1,1,12,5,2,0.253333,0.255046,0.59,0.155471,644,2451,3095
|
||||
729,12/29/2012,1,1,12,6,2,0.253333,0.2424,0.752917,0.124383,159,1182,1341
|
||||
730,12/30/2012,1,1,12,0,1,0.255833,0.2317,0.483333,0.350754,364,1432,1796
|
||||
731,12/31/2012,1,1,12,1,2,0.215833,0.223487,0.5775,0.154846,439,2290,2729
|
||||
|
@@ -123,12 +123,22 @@
|
||||
"data.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# let's take note of what columns means what in the data\n",
|
||||
"time_column_name = 'timeStamp'\n",
|
||||
"target_column_name = 'demand'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Get the train data\n",
|
||||
"\n"
|
||||
"### Split the data into train and test sets\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -137,46 +147,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train = data[data['timeStamp'] < '2017-02-01']\n",
|
||||
"test = data[data['timeStamp'] >= '2017-02-01']\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prepare the test data, we will feed X_test to the fitted model and get prediction"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_test = test.pop('demand').values\n",
|
||||
"X_test = test"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Split the train data to train and valid\n",
|
||||
"\n",
|
||||
"Use one month's data as valid data\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_train = train\n",
|
||||
"y_train = X_train.pop('demand').values\n",
|
||||
"print(X_train.shape)\n",
|
||||
"print(y_train.shape)"
|
||||
"X_train = data[data[time_column_name] < '2017-02-01']\n",
|
||||
"X_test = data[data[time_column_name] >= '2017-02-01']\n",
|
||||
"y_train = X_train.pop(target_column_name).values\n",
|
||||
"y_test = X_test.pop(target_column_name).values"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -205,9 +179,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"time_column_name = 'timeStamp'\n",
|
||||
"automl_settings = {\n",
|
||||
" \"time_column_name\": time_column_name,\n",
|
||||
" \"time_column_name\": time_column_name \n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@@ -218,7 +191,7 @@
|
||||
" iteration_timeout_minutes = 5,\n",
|
||||
" X = X_train,\n",
|
||||
" y = y_train,\n",
|
||||
" n_cross_validations = 2,\n",
|
||||
" n_cross_validations = 3,\n",
|
||||
" path=project_folder,\n",
|
||||
" verbosity = logging.INFO,\n",
|
||||
" **automl_settings)"
|
||||
@@ -228,7 +201,8 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can call the submit method on the experiment object and pass the run configuration. For Local runs the execution is synchronous. Depending on the data and number of iterations this can run for while.\n",
|
||||
"Submitting the configuration will start a new run in this experiment. For local runs, the execution is synchronous. Depending on the data and number of iterations, this can run for a while. Parameters controlling concurrency may speed up the process, depending on your hardware.\n",
|
||||
"\n",
|
||||
"You will see the currently running iterations printing to the console."
|
||||
]
|
||||
},
|
||||
@@ -285,35 +259,17 @@
|
||||
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### View the featurization summary\n",
|
||||
"Below we display the featurization that was performed on different raw features in the user data. For each raw feature in the user data, the following information is displayed:-\n",
|
||||
"- Raw feature name\n",
|
||||
"- Number of engineered features formed out of this raw feature\n",
|
||||
"- Type detected\n",
|
||||
"- If feature was dropped\n",
|
||||
"- List of feature transformations for the raw feature"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Test the Best Fitted Model\n",
|
||||
"\n",
|
||||
"Predict on training and test set, and calculate residual values."
|
||||
"For forecasting, we will use the `forecast` function instead of the `predict` function. There are two reasons for this.\n",
|
||||
"\n",
|
||||
"We need to pass the recent values of the target variable `y`, whereas the scikit-compatible `predict` function only takes the non-target variables `X`. In our case, the test data immediately follows the training data, and we fill the `y` variable with `NaN`. The `NaN` serves as a question mark for the forecaster to fill with the actuals. Using the forecast function will produce forecasts using the shortest possible forecast horizon. The last time at which a definite (non-NaN) value is seen is the _forecast origin_ - the last time when the value of the target is known. \n",
|
||||
"\n",
|
||||
"Using the `predict` method would result in getting predictions for EVERY horizon the forecaster can predict at. This is useful when training and evaluating the performance of the forecaster at various horizons, but the level of detail is excessive for normal use."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -322,15 +278,64 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_pred = fitted_model.predict(X_test)\n",
|
||||
"y_pred"
|
||||
"# Replace ALL values in y_pred by NaN. \n",
|
||||
"# The forecast origin will be at the beginning of the first forecast period\n",
|
||||
"# (which is the same time as the end of the last training period).\n",
|
||||
"y_query = y_test.copy().astype(np.float)\n",
|
||||
"y_query.fill(np.nan)\n",
|
||||
"# The featurized data, aligned to y, will also be returned.\n",
|
||||
"# This contains the assumptions that were made in the forecast\n",
|
||||
"# and helps align the forecast to the original data\n",
|
||||
"y_fcst, X_trans = fitted_model.forecast(X_test, y_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# limit the evaluation to data where y_test has actuals\n",
|
||||
"def align_outputs(y_predicted, X_trans, X_test, y_test, predicted_column_name = 'predicted'):\n",
|
||||
" \"\"\"\n",
|
||||
" Demonstrates how to get the output aligned to the inputs\n",
|
||||
" using pandas indexes. Helps understand what happened if\n",
|
||||
" the output's shape differs from the input shape, or if\n",
|
||||
" the data got re-sorted by time and grain during forecasting.\n",
|
||||
" \n",
|
||||
" Typical causes of misalignment are:\n",
|
||||
" * we predicted some periods that were missing in actuals -> drop from eval\n",
|
||||
" * model was asked to predict past max_horizon -> increase max horizon\n",
|
||||
" * data at start of X_test was needed for lags -> provide previous periods\n",
|
||||
" \"\"\"\n",
|
||||
" df_fcst = pd.DataFrame({predicted_column_name : y_predicted})\n",
|
||||
" # y and X outputs are aligned by forecast() function contract\n",
|
||||
" df_fcst.index = X_trans.index\n",
|
||||
" \n",
|
||||
" # align original X_test to y_test \n",
|
||||
" X_test_full = X_test.copy()\n",
|
||||
" X_test_full[target_column_name] = y_test\n",
|
||||
"\n",
|
||||
" # X_test_full's does not include origin, so reset for merge\n",
|
||||
" df_fcst.reset_index(inplace=True)\n",
|
||||
" X_test_full = X_test_full.reset_index().drop(columns='index')\n",
|
||||
" together = df_fcst.merge(X_test_full, how='right')\n",
|
||||
" \n",
|
||||
" # drop rows where prediction or actuals are nan \n",
|
||||
" # happens because of missing actuals \n",
|
||||
" # or at edges of time due to lags/rolling windows\n",
|
||||
" clean = together[together[[target_column_name, predicted_column_name]].notnull().all(axis=1)]\n",
|
||||
" return(clean)\n",
|
||||
"\n",
|
||||
"df_all = align_outputs(y_fcst, X_trans, X_test, y_test)\n",
|
||||
"df_all.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Use the Check Data Function to remove the nan values from y_test to avoid error when calculate metrics "
|
||||
"Looking at `X_trans` is also useful to see what featurization happened to the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -339,29 +344,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if len(y_test) != len(y_pred):\n",
|
||||
" raise ValueError(\n",
|
||||
" 'the true values and prediction values do not have equal length.')\n",
|
||||
"elif len(y_test) == 0:\n",
|
||||
" raise ValueError(\n",
|
||||
" 'y_true and y_pred are empty.')\n",
|
||||
"\n",
|
||||
"# if there is any non-numeric element in the y_true or y_pred,\n",
|
||||
"# the ValueError exception will be thrown.\n",
|
||||
"y_test_f = np.array(y_test).astype(float)\n",
|
||||
"y_pred_f = np.array(y_pred).astype(float)\n",
|
||||
"\n",
|
||||
"# remove entries both in y_true and y_pred where at least\n",
|
||||
"# one element in y_true or y_pred is missing\n",
|
||||
"y_test = y_test_f[~(np.isnan(y_test_f) | np.isnan(y_pred_f))]\n",
|
||||
"y_pred = y_pred_f[~(np.isnan(y_test_f) | np.isnan(y_pred_f))]"
|
||||
"X_trans"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Calculate metrics for the prediction\n"
|
||||
"### Calculate accuracy metrics\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -370,26 +360,180 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"[Test Data] \\nRoot Mean squared error: %.2f\" % np.sqrt(mean_squared_error(y_test, y_pred)))\n",
|
||||
"# Explained variance score: 1 is perfect prediction\n",
|
||||
"print('mean_absolute_error score: %.2f' % mean_absolute_error(y_test, y_pred))\n",
|
||||
"print('R2 score: %.2f' % r2_score(y_test, y_pred))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def MAPE(actual, pred):\n",
|
||||
" \"\"\"\n",
|
||||
" Calculate mean absolute percentage error.\n",
|
||||
" Remove NA and values where actual is close to zero\n",
|
||||
" \"\"\"\n",
|
||||
" not_na = ~(np.isnan(actual) | np.isnan(pred))\n",
|
||||
" not_zero = ~np.isclose(actual, 0.0)\n",
|
||||
" actual_safe = actual[not_na & not_zero]\n",
|
||||
" pred_safe = pred[not_na & not_zero]\n",
|
||||
" APE = 100*np.abs((actual_safe - pred_safe)/actual_safe)\n",
|
||||
" return np.mean(APE)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"Simple forecasting model\")\n",
|
||||
"rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all['predicted']))\n",
|
||||
"print(\"[Test Data] \\nRoot Mean squared error: %.2f\" % rmse)\n",
|
||||
"mae = mean_absolute_error(df_all[target_column_name], df_all['predicted'])\n",
|
||||
"print('mean_absolute_error score: %.2f' % mae)\n",
|
||||
"print('MAPE: %.2f' % MAPE(df_all[target_column_name], df_all['predicted']))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib notebook\n",
|
||||
"test_pred = plt.scatter(y_test, y_pred, color='b')\n",
|
||||
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The distribution looks a little heavy tailed: we underestimate the excursions of the extremes. A normal-quantile transform of the target might help, but let's first try using some past data with the lags and rolling window transforms.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using lags and rolling window features to improve the forecast"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, grain and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data.\n",
|
||||
"\n",
|
||||
"Now that we configured target lags, that is the previous values of the target variables, and the prediction is no longer horizon-less. We therefore must specify the `max_horizon` that the model will learn to forecast. The `target_lags` keyword specifies how far back we will construct the lags of the target variable, and the `target_rolling_window_size` specifies the size of the rolling window over which we will generate the `max`, `min` and `sum` features."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings_lags = {\n",
|
||||
" 'time_column_name': time_column_name,\n",
|
||||
" 'target_lags': 1,\n",
|
||||
" 'target_rolling_window_size': 5,\n",
|
||||
" # you MUST set the max_horizon when using lags and rolling windows\n",
|
||||
" # it is optional when looking-back features are not used \n",
|
||||
" 'max_horizon': len(y_test), # only one grain\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"automl_config_lags = AutoMLConfig(task = 'forecasting',\n",
|
||||
" debug_log = 'automl_nyc_energy_errors.log',\n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" iterations = 10,\n",
|
||||
" iteration_timeout_minutes = 5,\n",
|
||||
" X = X_train,\n",
|
||||
" y = y_train,\n",
|
||||
" n_cross_validations = 3,\n",
|
||||
" path=project_folder,\n",
|
||||
" verbosity = logging.INFO,\n",
|
||||
" **automl_settings_lags)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_run_lags = experiment.submit(automl_config_lags, show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run_lags, fitted_model_lags = local_run_lags.get_output()\n",
|
||||
"y_fcst_lags, X_trans_lags = fitted_model_lags.forecast(X_test, y_query)\n",
|
||||
"df_lags = align_outputs(y_fcst_lags, X_trans_lags, X_test, y_test)\n",
|
||||
"df_lags.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_trans_lags"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"Forecasting model with lags\")\n",
|
||||
"rmse = np.sqrt(mean_squared_error(df_lags[target_column_name], df_lags['predicted']))\n",
|
||||
"print(\"[Test Data] \\nRoot Mean squared error: %.2f\" % rmse)\n",
|
||||
"mae = mean_absolute_error(df_lags[target_column_name], df_lags['predicted'])\n",
|
||||
"print('mean_absolute_error score: %.2f' % mae)\n",
|
||||
"print('MAPE: %.2f' % MAPE(df_lags[target_column_name], df_lags['predicted']))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib notebook\n",
|
||||
"test_pred = plt.scatter(df_lags[target_column_name], df_lags['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### What features matter for the forecast?"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.automlexplainer import explain_model\n",
|
||||
"\n",
|
||||
"# feature names are everything in the transformed data except the target\n",
|
||||
"features = X_trans.columns[:-1]\n",
|
||||
"expl = explain_model(fitted_model, X_train, X_test, features = features, best_run=best_run_lags, y_train = y_train)\n",
|
||||
"# unpack the tuple\n",
|
||||
"shap_values, expected_values, feat_overall_imp, feat_names, per_class_summary, per_class_imp = expl\n",
|
||||
"best_run_lags"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Please go to the Azure Portal's best run to see the top features chart.\n",
|
||||
"\n",
|
||||
"The informative features make all sorts of intuitive sense. Temperature is a strong driver of heating and cooling demand in NYC. Apart from that, the daily life cycle, expressed by `hour`, and the weekly cycle, expressed by `wday` drives people's energy use habits."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "xiaga"
|
||||
"name": "xiaga, tosingli"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
@@ -407,7 +551,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -20,7 +20,9 @@
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Data](#Data)\n",
|
||||
"1. [Train](#Train)"
|
||||
"1. [Train](#Train)\n",
|
||||
"1. [Predict](#Predict)\n",
|
||||
"1. [Operationalize](#Operationalize)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -36,8 +38,7 @@
|
||||
"1. Create an Experiment in an existing Workspace\n",
|
||||
"2. Instantiate an AutoMLConfig \n",
|
||||
"3. Find and train a forecasting model using local compute\n",
|
||||
"4. Viewing the engineered names for featurized data and featurization summary for all raw features\n",
|
||||
"5. Evaluate the performance of the model\n",
|
||||
"4. Evaluate the performance of the model\n",
|
||||
"\n",
|
||||
"The examples in the follow code samples use the University of Chicago's Dominick's Finer Foods dataset to forecast orange juice sales. Dominick's was a grocery chain in the Chicago metropolitan area."
|
||||
]
|
||||
@@ -86,9 +87,9 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'automl-ojsalesforecasting'\n",
|
||||
"experiment_name = 'automl-ojforecasting'\n",
|
||||
"# project folder\n",
|
||||
"project_folder = './sample_projects/automl-local-ojsalesforecasting'\n",
|
||||
"project_folder = './sample_projects/automl-local-ojforecasting'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
@@ -261,12 +262,12 @@
|
||||
" 'time_column_name': time_column_name,\n",
|
||||
" 'grain_column_names': grain_column_names,\n",
|
||||
" 'drop_column_names': ['logQuantity'],\n",
|
||||
" 'max_horizon': n_test_periods\n",
|
||||
" 'max_horizon': n_test_periods # optional\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting',\n",
|
||||
" debug_log='automl_oj_sales_errors.log',\n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" primary_metric='normalized_mean_absolute_error',\n",
|
||||
" iterations=10,\n",
|
||||
" X=X_train,\n",
|
||||
" y=y_train,\n",
|
||||
@@ -294,15 +295,6 @@
|
||||
"local_run = experiment.submit(automl_config, show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -325,46 +317,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### View the engineered names for featurized data\n",
|
||||
"Below we display the engineered feature names generated for the featurized data using the time-series featurization."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_pipeline.named_steps['timeseriestransformer'].get_engineered_feature_names()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### View the featurization summary\n",
|
||||
"Below we display the featurization that was performed on different raw features in the user data. For each raw feature in the user data, the following information is displayed:-\n",
|
||||
"- Raw feature name\n",
|
||||
"- Number of engineered features formed out of this raw feature\n",
|
||||
"- Type detected\n",
|
||||
"- If feature was dropped\n",
|
||||
"- List of feature transformations for the raw feature"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_pipeline.named_steps['timeseriestransformer'].get_featurization_summary()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Make Predictions from the Best Fitted Model\n",
|
||||
"# Predict\n",
|
||||
"Now that we have retrieved the best pipeline/model, it can be used to make predictions on test data. First, we remove the target values from the test set:"
|
||||
]
|
||||
},
|
||||
@@ -392,7 +345,7 @@
|
||||
"source": [
|
||||
"To produce predictions on the test set, we need to know the feature values at all dates in the test set. This requirement is somewhat reasonable for the OJ sales data since the features mainly consist of price, which is usually set in advance, and customer demographics which are approximately constant for each store over the 20 week forecast horizon in the testing data. \n",
|
||||
"\n",
|
||||
"The target predictions can be retrieved by calling the `predict` method on the best model:"
|
||||
"We will first create a query `y_query`, which is aligned index-for-index to `X_test`. This is a vector of target values where each `NaN` serves the function of the question mark to be replaced by forecast. Passing definite values in the `y` argument allows the `forecast` function to make predictions on data that does not immediately follow the train data which contains `y`. In each grain, the last time point where the model sees a definite value of `y` is that grain's _forecast origin_."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -401,15 +354,76 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_pred = fitted_pipeline.predict(X_test)"
|
||||
"# Replace ALL values in y_pred by NaN.\n",
|
||||
"# The forecast origin will be at the beginning of the first forecast period.\n",
|
||||
"# (Which is the same time as the end of the last training period.)\n",
|
||||
"y_query = y_test.copy().astype(np.float)\n",
|
||||
"y_query.fill(np.nan)\n",
|
||||
"# The featurized data, aligned to y, will also be returned.\n",
|
||||
"# This contains the assumptions that were made in the forecast\n",
|
||||
"# and helps align the forecast to the original data\n",
|
||||
"y_pred, X_trans = fitted_pipeline.forecast(X_test, y_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Calculate evaluation metrics for the prediction\n",
|
||||
"To evaluate the accuracy of the forecast, we'll compare against the actual sales quantities for some select metrics, included the mean absolute percentage error (MAPE)."
|
||||
"If you are used to scikit pipelines, perhaps you expected `predict(X_test)`. However, forecasting requires a more general interface that also supplies the past target `y` values. Please use `forecast(X,y)` as `predict(X)` is reserved for internal purposes on forecasting models.\n",
|
||||
"\n",
|
||||
"The [energy demand forecasting notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand) demonstrates the use of the forecast function in more detail in the context of using lags and rolling window features. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Evaluate\n",
|
||||
"\n",
|
||||
"To evaluate the accuracy of the forecast, we'll compare against the actual sales quantities for some select metrics, included the mean absolute percentage error (MAPE). \n",
|
||||
"\n",
|
||||
"It is a good practice to always align the output explicitly to the input, as the count and order of the rows may have changed during transformations that span multiple rows."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def align_outputs(y_predicted, X_trans, X_test, y_test, predicted_column_name = 'predicted'):\n",
|
||||
" \"\"\"\n",
|
||||
" Demonstrates how to get the output aligned to the inputs\n",
|
||||
" using pandas indexes. Helps understand what happened if\n",
|
||||
" the output's shape differs from the input shape, or if\n",
|
||||
" the data got re-sorted by time and grain during forecasting.\n",
|
||||
" \n",
|
||||
" Typical causes of misalignment are:\n",
|
||||
" * we predicted some periods that were missing in actuals -> drop from eval\n",
|
||||
" * model was asked to predict past max_horizon -> increase max horizon\n",
|
||||
" * data at start of X_test was needed for lags -> provide previous periods in y\n",
|
||||
" \"\"\"\n",
|
||||
" \n",
|
||||
" df_fcst = pd.DataFrame({predicted_column_name : y_predicted})\n",
|
||||
" # y and X outputs are aligned by forecast() function contract\n",
|
||||
" df_fcst.index = X_trans.index\n",
|
||||
" \n",
|
||||
" # align original X_test to y_test \n",
|
||||
" X_test_full = X_test.copy()\n",
|
||||
" X_test_full[target_column_name] = y_test\n",
|
||||
"\n",
|
||||
" # X_test_full's index does not include origin, so reset for merge\n",
|
||||
" df_fcst.reset_index(inplace=True)\n",
|
||||
" X_test_full = X_test_full.reset_index().drop(columns='index')\n",
|
||||
" together = df_fcst.merge(X_test_full, how='right')\n",
|
||||
" \n",
|
||||
" # drop rows where prediction or actuals are nan \n",
|
||||
" # happens because of missing actuals \n",
|
||||
" # or at edges of time due to lags/rolling windows\n",
|
||||
" clean = together[together[[target_column_name, predicted_column_name]].notnull().all(axis=1)]\n",
|
||||
" return(clean)\n",
|
||||
"\n",
|
||||
"df_all = align_outputs(y_pred, X_trans, X_test, y_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -428,18 +442,392 @@
|
||||
" actual_safe = actual[not_na & not_zero]\n",
|
||||
" pred_safe = pred[not_na & not_zero]\n",
|
||||
" APE = 100*np.abs((actual_safe - pred_safe)/actual_safe)\n",
|
||||
" return np.mean(APE)\n",
|
||||
" return np.mean(APE)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"Simple forecasting model\")\n",
|
||||
"rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all['predicted']))\n",
|
||||
"print(\"[Test Data] \\nRoot Mean squared error: %.2f\" % rmse)\n",
|
||||
"mae = mean_absolute_error(df_all[target_column_name], df_all['predicted'])\n",
|
||||
"print('mean_absolute_error score: %.2f' % mae)\n",
|
||||
"print('MAPE: %.2f' % MAPE(df_all[target_column_name], df_all['predicted']))\n",
|
||||
"\n",
|
||||
"print(\"[Test Data] \\nRoot Mean squared error: %.2f\" % np.sqrt(mean_squared_error(y_test, y_pred)))\n",
|
||||
"print('mean_absolute_error score: %.2f' % mean_absolute_error(y_test, y_pred))\n",
|
||||
"print('MAPE: %.2f' % MAPE(y_test, y_pred))"
|
||||
"# Plot outputs\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"%matplotlib notebook\n",
|
||||
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Operationalize"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"_Operationalization_ means getting the model into the cloud so that other can run it after you close the notebook. We will create a docker running on Azure Container Instances with the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"description = 'AutoML OJ forecaster'\n",
|
||||
"tags = None\n",
|
||||
"model = local_run.register_model(description = description, tags = tags)\n",
|
||||
"\n",
|
||||
"print(local_run.model_id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Develop the scoring script\n",
|
||||
"\n",
|
||||
"Serializing and deserializing complex data frames may be tricky. We first develop the `run()` function of the scoring script locally, then write it into a scoring script. It is much easier to debug any quirks of the scoring function without crossing two compute environments. For this exercise, we handle a common quirk of how pandas dataframes serialize time stamp values."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# this is where we test the run function of the scoring script interactively\n",
|
||||
"# before putting it in the scoring script\n",
|
||||
"\n",
|
||||
"timestamp_columns = ['WeekStarting']\n",
|
||||
"\n",
|
||||
"def run(rawdata, test_model = None):\n",
|
||||
" \"\"\"\n",
|
||||
" Intended to process 'rawdata' string produced by\n",
|
||||
" \n",
|
||||
" {'X': X_test.to_json(), y' : y_test.to_json()}\n",
|
||||
" \n",
|
||||
" Don't convert the X payload to numpy.array, use it as pandas.DataFrame\n",
|
||||
" \"\"\"\n",
|
||||
" try:\n",
|
||||
" # unpack the data frame with timestamp \n",
|
||||
" rawobj = json.loads(rawdata) # rawobj is now a dict of strings \n",
|
||||
" X_pred = pd.read_json(rawobj['X'], convert_dates=False) # load the pandas DF from a json string\n",
|
||||
" for col in timestamp_columns: # fix timestamps\n",
|
||||
" X_pred[col] = pd.to_datetime(X_pred[col], unit='ms') \n",
|
||||
" \n",
|
||||
" y_pred = np.array(rawobj['y']) # reconstitute numpy array from serialized list\n",
|
||||
" \n",
|
||||
" if test_model is None:\n",
|
||||
" result = model.forecast(X_pred, y_pred) # use the global model from init function\n",
|
||||
" else:\n",
|
||||
" result = test_model.forecast(X_pred, y_pred) # use the model on which we are testing\n",
|
||||
" \n",
|
||||
" except Exception as e:\n",
|
||||
" result = str(e)\n",
|
||||
" return json.dumps({\"error\": result})\n",
|
||||
" \n",
|
||||
" forecast_as_list = result[0].tolist()\n",
|
||||
" index_as_df = result[1].index.to_frame().reset_index(drop=True)\n",
|
||||
" \n",
|
||||
" return json.dumps({\"forecast\": forecast_as_list, # return the minimum over the wire: \n",
|
||||
" \"index\": index_as_df.to_json() # no forecast and its featurized values\n",
|
||||
" })"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# test the run function here before putting in the scoring script\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"test_sample = json.dumps({'X': X_test.to_json(), 'y' : y_query.tolist()})\n",
|
||||
"response = run(test_sample, fitted_pipeline)\n",
|
||||
"\n",
|
||||
"# unpack the response, dealing with the timestamp serialization again\n",
|
||||
"res_dict = json.loads(response)\n",
|
||||
"y_fcst_all = pd.read_json(res_dict['index'])\n",
|
||||
"y_fcst_all[time_column_name] = pd.to_datetime(y_fcst_all[time_column_name], unit = 'ms')\n",
|
||||
"y_fcst_all['forecast'] = res_dict['forecast']\n",
|
||||
"y_fcst_all.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now that the function works locally in the notebook, let's write it down into the scoring script. The scoring script is authored by the data scientist. Adjust it to taste, adding inputs, outputs and processing as needed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile score_fcast.py\n",
|
||||
"import pickle\n",
|
||||
"import json\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"import azureml.train.automl\n",
|
||||
"from sklearn.externals import joblib\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def init():\n",
|
||||
" global model\n",
|
||||
" model_path = Model.get_model_path(model_name = '<<modelid>>') # this name is model.id of model that we want to deploy\n",
|
||||
" # deserialize the model file back into a sklearn model\n",
|
||||
" model = joblib.load(model_path)\n",
|
||||
"\n",
|
||||
"timestamp_columns = ['WeekStarting']\n",
|
||||
"\n",
|
||||
"def run(rawdata, test_model = None):\n",
|
||||
" \"\"\"\n",
|
||||
" Intended to process 'rawdata' string produced by\n",
|
||||
" \n",
|
||||
" {'X': X_test.to_json(), y' : y_test.to_json()}\n",
|
||||
" \n",
|
||||
" Don't convert the X payload to numpy.array, use it as pandas.DataFrame\n",
|
||||
" \"\"\"\n",
|
||||
" try:\n",
|
||||
" # unpack the data frame with timestamp \n",
|
||||
" rawobj = json.loads(rawdata) # rawobj is now a dict of strings \n",
|
||||
" X_pred = pd.read_json(rawobj['X'], convert_dates=False) # load the pandas DF from a json string\n",
|
||||
" for col in timestamp_columns: # fix timestamps\n",
|
||||
" X_pred[col] = pd.to_datetime(X_pred[col], unit='ms') \n",
|
||||
" \n",
|
||||
" y_pred = np.array(rawobj['y']) # reconstitute numpy array from serialized list\n",
|
||||
" \n",
|
||||
" if test_model is None:\n",
|
||||
" result = model.forecast(X_pred, y_pred) # use the global model from init function\n",
|
||||
" else:\n",
|
||||
" result = test_model.forecast(X_pred, y_pred) # use the model on which we are testing\n",
|
||||
" \n",
|
||||
" except Exception as e:\n",
|
||||
" result = str(e)\n",
|
||||
" return json.dumps({\"error\": result})\n",
|
||||
" \n",
|
||||
" # prepare to send over wire as json\n",
|
||||
" forecast_as_list = result[0].tolist()\n",
|
||||
" index_as_df = result[1].index.to_frame().reset_index(drop=True)\n",
|
||||
" \n",
|
||||
" return json.dumps({\"forecast\": forecast_as_list, # return the minimum over the wire: \n",
|
||||
" \"index\": index_as_df.to_json() # no forecast and its featurized values\n",
|
||||
" })"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# get the model\n",
|
||||
"from azureml.train.automl.run import AutoMLRun\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"ml_run = AutoMLRun(experiment = experiment, run_id = local_run.id)\n",
|
||||
"best_iteration = int(str.split(best_run.id,'_')[-1]) # the iteration number is a postfix of the run ID."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# get the best model's dependencies and write them into this file\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"conda_env_file_name = 'fcast_env.yml'\n",
|
||||
"\n",
|
||||
"dependencies = ml_run.get_run_sdk_dependencies(iteration = best_iteration)\n",
|
||||
"for p in ['azureml-train-automl', 'azureml-sdk', 'azureml-core']:\n",
|
||||
" print('{}\\t{}'.format(p, dependencies[p]))\n",
|
||||
"\n",
|
||||
"myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'], pip_packages=['azureml-sdk[automl]'])\n",
|
||||
"\n",
|
||||
"myenv.save_to_file('.', conda_env_file_name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# this is the script file name we wrote a few cells above\n",
|
||||
"script_file_name = 'score_fcast.py'\n",
|
||||
"\n",
|
||||
"# Substitute the actual version number in the environment file.\n",
|
||||
"# This is not strictly needed in this notebook because the model should have been generated using the current SDK version.\n",
|
||||
"# However, we include this in case this code is used on an experiment from a previous SDK version.\n",
|
||||
"\n",
|
||||
"with open(conda_env_file_name, 'r') as cefr:\n",
|
||||
" content = cefr.read()\n",
|
||||
"\n",
|
||||
"with open(conda_env_file_name, 'w') as cefw:\n",
|
||||
" cefw.write(content.replace(azureml.core.VERSION, dependencies['azureml-sdk']))\n",
|
||||
"\n",
|
||||
"# Substitute the actual model id in the script file.\n",
|
||||
"\n",
|
||||
"with open(script_file_name, 'r') as cefr:\n",
|
||||
" content = cefr.read()\n",
|
||||
"\n",
|
||||
"with open(script_file_name, 'w') as cefw:\n",
|
||||
" cefw.write(content.replace('<<modelid>>', local_run.model_id))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a Container Image"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.image import Image, ContainerImage\n",
|
||||
"\n",
|
||||
"image_config = ContainerImage.image_configuration(runtime= \"python\",\n",
|
||||
" execution_script = script_file_name,\n",
|
||||
" conda_file = conda_env_file_name,\n",
|
||||
" tags = {'type': \"automl-forecasting\"},\n",
|
||||
" description = \"Image for automl forecasting sample\")\n",
|
||||
"\n",
|
||||
"image = Image.create(name = \"automl-fcast-image\",\n",
|
||||
" # this is the model object \n",
|
||||
" models = [model],\n",
|
||||
" image_config = image_config, \n",
|
||||
" workspace = ws)\n",
|
||||
"\n",
|
||||
"image.wait_for_creation(show_output = True)\n",
|
||||
"\n",
|
||||
"if image.creation_state == 'Failed':\n",
|
||||
" print(\"Image build log at: \" + image.image_build_log_uri)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Deploy the Image as a Web Service on Azure Container Instance"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.webservice import AciWebservice\n",
|
||||
"\n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, \n",
|
||||
" memory_gb = 2, \n",
|
||||
" tags = {'type': \"automl-forecasting\"},\n",
|
||||
" description = \"Automl forecasting sample service\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.webservice import Webservice\n",
|
||||
"\n",
|
||||
"aci_service_name = 'automl-forecast-01'\n",
|
||||
"print(aci_service_name)\n",
|
||||
"\n",
|
||||
"aci_service = Webservice.deploy_from_image(deployment_config = aciconfig,\n",
|
||||
" image = image,\n",
|
||||
" name = aci_service_name,\n",
|
||||
" workspace = ws)\n",
|
||||
"aci_service.wait_for_deployment(True)\n",
|
||||
"print(aci_service.state)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Call the service"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# we send the data to the service serialized into a json string\n",
|
||||
"test_sample = json.dumps({'X':X_test.to_json(), 'y' : y_query.tolist()})\n",
|
||||
"response = aci_service.run(input_data = test_sample)\n",
|
||||
"\n",
|
||||
"# translate from networkese to datascientese\n",
|
||||
"try: \n",
|
||||
" res_dict = json.loads(response)\n",
|
||||
" y_fcst_all = pd.read_json(res_dict['index'])\n",
|
||||
" y_fcst_all[time_column_name] = pd.to_datetime(y_fcst_all[time_column_name], unit = 'ms')\n",
|
||||
" y_fcst_all['forecast'] = res_dict['forecast'] \n",
|
||||
"except:\n",
|
||||
" print(res_dict)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_fcst_all.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Delete the web service if desired"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"serv = Webservice(ws, 'automl-forecast-01')\n",
|
||||
"# serv.delete() # don't do it accidentally"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "erwright"
|
||||
"name": "erwright, tosingli"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
@@ -457,7 +845,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -254,7 +254,9 @@
|
||||
"3.\toverall_summary: The model level feature importance values sorted in descending order\n",
|
||||
"4.\toverall_imp: The feature names sorted in the same order as in overall_summary\n",
|
||||
"5.\tper_class_summary: The class level feature importance values sorted in descending order. Only available for the classification case\n",
|
||||
"6.\tper_class_imp: The feature names sorted in the same order as in per_class_summary. Only available for the classification case"
|
||||
"6.\tper_class_imp: The feature names sorted in the same order as in per_class_summary. Only available for the classification case\n",
|
||||
"\n",
|
||||
"Note:- The **retrieve_model_explanation()** API only works in case AutoML has been configured with **'model_explainability'** flag set to **True**. "
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -111,7 +111,7 @@
|
||||
"source": [
|
||||
"### Attach a Remote Linux DSVM\n",
|
||||
"To use a remote Docker compute target:\n",
|
||||
"1. Create a Linux DSVM in Azure, following these [quick instructions](https://docs.microsoft.com/en-us/azure/machine-learning/desktop-workbench/how-to-create-dsvm-hdi). Make sure you use the Ubuntu flavor (not CentOS). Make sure that disk space is available under `/tmp` because AutoML creates files under `/tmp/azureml_run`s. The DSVM should have more cores than the number of parallel runs that you plan to enable. It should also have at least 4GB per core.\n",
|
||||
"1. Create a Linux DSVM in Azure, following these [instructions](https://docs.microsoft.com/en-us/azure/machine-learning/data-science-virtual-machine/dsvm-ubuntu-intro). Make sure you use the Ubuntu flavor (not CentOS). Make sure that disk space is available under `/tmp` because AutoML creates files under `/tmp/azureml_run`s. The DSVM should have more cores than the number of parallel runs that you plan to enable. It should also have at least 4GB per core.\n",
|
||||
"2. Enter the IP address, user name and password below.\n",
|
||||
"\n",
|
||||
"**Note:** By default, SSH runs on port 22 and you don't need to change the port number below. If you've configured SSH to use a different port, change `dsvm_ssh_port` accordinglyaddress. [Read more](https://docs.microsoft.com/en-us/azure/virtual-machines/troubleshooting/detailed-troubleshoot-ssh-connection) on changing SSH ports for security reasons."
|
||||
|
||||
@@ -51,3 +51,5 @@ provided below for:
|
||||
|
||||
Create HDI cluster:
|
||||
<https://docs.microsoft.com/en-us/azure/hdinsight/hdinsight-hadoop-provision-linux-clusters>
|
||||
|
||||

|
||||
|
||||
@@ -8,7 +8,14 @@
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
12
how-to-use-azureml/deploy-to-cloud/README.md
Normal file
12
how-to-use-azureml/deploy-to-cloud/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Model Deployment with Azure ML service
|
||||
You can use Azure Machine Learning to package, debug, validate and deploy inference containers to a variety of compute targets. This process is known as "MLOps" (ML operationalization).
|
||||
For more information please check out this article: https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where
|
||||
|
||||
## Get Started
|
||||
To begin, you will need an ML workspace.
|
||||
For more information please check out this article: https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-workspace
|
||||
|
||||
## Deploy to the cloud
|
||||
You can deploy to the cloud using the Azure ML CLI or the Azure ML SDK.
|
||||
- CLI example: https://aka.ms/azmlcli
|
||||
- Notebook example: [model-register-and-deploy](./model-register-and-deploy.ipynb).
|
||||
1
how-to-use-azureml/deploy-to-cloud/helloworld.txt
Normal file
1
how-to-use-azureml/deploy-to-cloud/helloworld.txt
Normal file
@@ -0,0 +1 @@
|
||||
RUN echo "this is test"
|
||||
@@ -0,0 +1,282 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Register Model and deploy as Webservice\n",
|
||||
"\n",
|
||||
"This example shows how to deploy a Webservice in step-by-step fashion:\n",
|
||||
"\n",
|
||||
" 1. Register Model\n",
|
||||
" 2. Deploy Model as Webservice"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration](../../configuration.ipynb) Notebook first if you haven't already to establish your connection to the AzureML Workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Check core SDK version number\n",
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize Workspace\n",
|
||||
"\n",
|
||||
"Initialize a workspace object from persisted configuration."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"create workspace"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Register Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can add tags and descriptions to your Models. Note you need to have a `sklearn_regression_model.pkl` file in the current directory. This file is generated by the 01 notebook. The below call registers that file as a Model with the same name `sklearn_regression_model.pkl` in the workspace.\n",
|
||||
"\n",
|
||||
"Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"register model from file"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.model import Model\n",
|
||||
"\n",
|
||||
"model = Model.register(model_path = \"sklearn_regression_model.pkl\",\n",
|
||||
" model_name = \"sklearn_regression_model.pkl\",\n",
|
||||
" tags = {'area': \"diabetes\", 'type': \"regression\"},\n",
|
||||
" description = \"Ridge regression model to predict diabetes\",\n",
|
||||
" workspace = ws)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create Inference Configuration\n",
|
||||
"\n",
|
||||
"There is now support for a source directory, you can upload an entire folder from your local machine as dependencies for the Webservice.\n",
|
||||
"Note: in that case, your entry_script, conda_file, and extra_docker_file_steps paths are relative paths to the source_directory path.\n",
|
||||
"\n",
|
||||
"Sample code for using a source directory:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"inference_config = InferenceConfig(source_directory=\"C:/abc\",\n",
|
||||
" runtime= \"python\", \n",
|
||||
" entry_script=\"x/y/score.py\",\n",
|
||||
" conda_file=\"env/myenv.yml\", \n",
|
||||
" extra_docker_file_steps=\"helloworld.txt\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
" - source_directory = holds source path as string, this entire folder gets added in image so its really easy to access any files within this folder or subfolder\n",
|
||||
" - runtime = Which runtime to use for the image. Current supported runtimes are 'spark-py' and 'python\n",
|
||||
" - entry_script = contains logic specific to initializing your model and running predictions\n",
|
||||
" - conda_file = manages conda and python package dependencies.\n",
|
||||
" - extra_docker_file_steps = optional: any extra steps you want to inject into docker file"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"create image"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.model import InferenceConfig\n",
|
||||
"\n",
|
||||
"inference_config = InferenceConfig(runtime= \"python\", \n",
|
||||
" entry_script=\"score.py\",\n",
|
||||
" conda_file=\"myenv.yml\", \n",
|
||||
" extra_docker_file_steps=\"helloworld.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Deploy Model as Webservice on Azure Container Instance\n",
|
||||
"\n",
|
||||
"Note that the service creation can take few minutes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.webservice import AciWebservice, Webservice\n",
|
||||
"from azureml.exceptions import WebserviceException\n",
|
||||
"\n",
|
||||
"deployment_config = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1)\n",
|
||||
"aci_service_name = 'aciservice1'\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" # if you want to get existing service below is the command\n",
|
||||
" # since aci name needs to be unique in subscription deleting existing aci if any\n",
|
||||
" # we use aci_service_name to create azure aci\n",
|
||||
" service = Webservice(ws, name=aci_service_name)\n",
|
||||
" if service:\n",
|
||||
" service.delete()\n",
|
||||
"except WebserviceException as e:\n",
|
||||
" print()\n",
|
||||
"\n",
|
||||
"service = Model.deploy(ws, aci_service_name, [model], inference_config, deployment_config)\n",
|
||||
"\n",
|
||||
"service.wait_for_deployment(True)\n",
|
||||
"print(service.state)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Test web service"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"test_sample = json.dumps({'data': [\n",
|
||||
" [1,2,3,4,5,6,7,8,9,10], \n",
|
||||
" [10,9,8,7,6,5,4,3,2,1]\n",
|
||||
"]})\n",
|
||||
"\n",
|
||||
"test_sample_encoded = bytes(test_sample,encoding = 'utf8')\n",
|
||||
"prediction = service.run(input_data=test_sample_encoded)\n",
|
||||
"print(prediction)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Delete ACI to clean up"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"deploy service",
|
||||
"aci"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"service.delete()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Model Profiling\n",
|
||||
"\n",
|
||||
"you can also take advantage of profiling feature for model\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"\n",
|
||||
"profile = model.profile(ws, \"profilename\", [model], inference_config, test_sample)\n",
|
||||
"profile.wait_for_profiling(True)\n",
|
||||
"profiling_results = profile.get_results()\n",
|
||||
"print(profiling_results)\n",
|
||||
"\n",
|
||||
"```"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "aashishb"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
8
how-to-use-azureml/deploy-to-cloud/myenv.yml
Normal file
8
how-to-use-azureml/deploy-to-cloud/myenv.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
name: project_environment
|
||||
dependencies:
|
||||
- python=3.6.2
|
||||
- pip:
|
||||
- azureml-defaults
|
||||
- scikit-learn
|
||||
- numpy
|
||||
- inference-schema[numpy-support]
|
||||
34
how-to-use-azureml/deploy-to-cloud/score.py
Normal file
34
how-to-use-azureml/deploy-to-cloud/score.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import pickle
|
||||
import json
|
||||
import numpy as np
|
||||
from sklearn.externals import joblib
|
||||
from sklearn.linear_model import Ridge
|
||||
from azureml.core.model import Model
|
||||
|
||||
from inference_schema.schema_decorators import input_schema, output_schema
|
||||
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
|
||||
|
||||
|
||||
def init():
|
||||
global model
|
||||
# note here "sklearn_regression_model.pkl" is the name of the model registered under
|
||||
# this is a different behavior than before when the code is run locally, even though the code is the same.
|
||||
model_path = Model.get_model_path('sklearn_regression_model.pkl')
|
||||
# deserialize the model file back into a sklearn model
|
||||
model = joblib.load(model_path)
|
||||
|
||||
|
||||
input_sample = np.array([[10, 9, 8, 7, 6, 5, 4, 3, 2, 1]])
|
||||
output_sample = np.array([3726.995])
|
||||
|
||||
|
||||
@input_schema('data', NumpyParameterType(input_sample))
|
||||
@output_schema(NumpyParameterType(output_sample))
|
||||
def run(data):
|
||||
try:
|
||||
result = model.predict(data)
|
||||
# you can return any datatype as long as it is JSON-serializable
|
||||
return result.tolist()
|
||||
except Exception as e:
|
||||
error = str(e)
|
||||
return error
|
||||
BIN
how-to-use-azureml/deploy-to-cloud/sklearn_regression_model.pkl
Normal file
BIN
how-to-use-azureml/deploy-to-cloud/sklearn_regression_model.pkl
Normal file
Binary file not shown.
12
how-to-use-azureml/deploy-to-local/README.md
Normal file
12
how-to-use-azureml/deploy-to-local/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Model Deployment with Azure ML service
|
||||
You can use Azure Machine Learning to package, debug, validate and deploy inference containers to a variety of compute targets. This process is known as "MLOps" (ML operationalization).
|
||||
For more information please check out this article: https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where
|
||||
|
||||
## Get Started
|
||||
To begin, you will need an ML workspace.
|
||||
For more information please check out this article: https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-workspace
|
||||
|
||||
## Deploy locally
|
||||
You can deploy a model locally for testing & debugging using the Azure ML CLI or the Azure ML SDK.
|
||||
- CLI example: https://aka.ms/azmlcli
|
||||
- Notebook example: [register-model-deploy-local](./register-model-deploy-local.ipynb).
|
||||
BIN
how-to-use-azureml/deploy-to-local/dockerSharedDrive.JPG
Normal file
BIN
how-to-use-azureml/deploy-to-local/dockerSharedDrive.JPG
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 22 KiB |
1
how-to-use-azureml/deploy-to-local/helloworld.txt
Normal file
1
how-to-use-azureml/deploy-to-local/helloworld.txt
Normal file
@@ -0,0 +1 @@
|
||||
RUN echo "this is test"
|
||||
8
how-to-use-azureml/deploy-to-local/myenv.yml
Normal file
8
how-to-use-azureml/deploy-to-local/myenv.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
name: project_environment
|
||||
dependencies:
|
||||
- python=3.6.2
|
||||
- pip:
|
||||
- azureml-defaults
|
||||
- scikit-learn
|
||||
- numpy
|
||||
- inference-schema[numpy-support]
|
||||
@@ -0,0 +1,494 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Register model and deploy locally with advanced usages\n",
|
||||
"\n",
|
||||
"This example shows how to deploy a web service in step-by-step fashion:\n",
|
||||
"\n",
|
||||
" 1. Register model\n",
|
||||
" 2. Deploy the image as a web service in a local Docker container.\n",
|
||||
" 3. Quickly test changes to your entry script by reloading the local service.\n",
|
||||
" 4. Optionally, you can also make changes to model, conda or extra_docker_file_steps and update local service"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise,make sure you go through the [configuration](../../../configuration.ipynb) Notebook first if you haven't."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Check core SDK version number\n",
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize Workspace\n",
|
||||
"\n",
|
||||
"Initialize a workspace object from persisted configuration."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"create workspace"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Register Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can add tags and descriptions to your models. we are using `sklearn_regression_model.pkl` file in the current directory as a model with the same name `sklearn_regression_model.pkl` in the workspace.\n",
|
||||
"\n",
|
||||
"Using tags, you can track useful information such as the name and version of the machine learning library used to train the model, framework, category, target customer etc. Note that tags must be alphanumeric."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"register model from file"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.model import Model\n",
|
||||
"\n",
|
||||
"model = Model.register(model_path = \"sklearn_regression_model.pkl\",\n",
|
||||
" model_name = \"sklearn_regression_model.pkl\",\n",
|
||||
" tags = {'area': \"diabetes\", 'type': \"regression\"},\n",
|
||||
" description = \"Ridge regression model to predict diabetes\",\n",
|
||||
" workspace = ws)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Manage your dependencies in a folder"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"source_directory = \"C:/abc\"\n",
|
||||
"\n",
|
||||
"os.makedirs(source_directory, exist_ok = True)\n",
|
||||
"os.makedirs(\"C:/abc/x/y\", exist_ok = True)\n",
|
||||
"os.makedirs(\"C:/abc/env\", exist_ok = True)\n",
|
||||
"os.makedirs(\"C:/abc/dockerstep\", exist_ok = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Show `score.py`. Note that the `sklearn_regression_model.pkl` in the `get_model_path` call is referring to a model named `sklearn_regression_model.pkl` registered under the workspace. It is NOT referencing the local file."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile C:/abc/x/y/score.py\n",
|
||||
"import pickle\n",
|
||||
"import json\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.externals import joblib\n",
|
||||
"from sklearn.linear_model import Ridge\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"\n",
|
||||
"from inference_schema.schema_decorators import input_schema, output_schema\n",
|
||||
"from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType\n",
|
||||
"\n",
|
||||
"def init():\n",
|
||||
" global model\n",
|
||||
" # note here \"sklearn_regression_model.pkl\" is the name of the model registered under\n",
|
||||
" # this is a different behavior than before when the code is run locally, even though the code is the same.\n",
|
||||
" model_path = Model.get_model_path('sklearn_regression_model.pkl')\n",
|
||||
" # deserialize the model file back into a sklearn model\n",
|
||||
" model = joblib.load(model_path)\n",
|
||||
" global name\n",
|
||||
" # note here, entire source directory on inference config gets added into image\n",
|
||||
" # bellow is the example how you can use any extra files in image\n",
|
||||
" with open('./abc/extradata.json') as json_file: \n",
|
||||
" data = json.load(json_file)\n",
|
||||
" name = data[\"people\"][0][\"name\"]\n",
|
||||
"\n",
|
||||
"input_sample = np.array([[10,9,8,7,6,5,4,3,2,1]])\n",
|
||||
"output_sample = np.array([3726.995])\n",
|
||||
"\n",
|
||||
"@input_schema('data', NumpyParameterType(input_sample))\n",
|
||||
"@output_schema(NumpyParameterType(output_sample))\n",
|
||||
"def run(data):\n",
|
||||
" try:\n",
|
||||
" result = model.predict(data)\n",
|
||||
" # you can return any datatype as long as it is JSON-serializable\n",
|
||||
" return \"Hello \" + name + \" here is your result = \" + str(result)\n",
|
||||
" except Exception as e:\n",
|
||||
" error = str(e)\n",
|
||||
" return error"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile C:/abc/env/myenv.yml\n",
|
||||
"name: project_environment\n",
|
||||
"dependencies:\n",
|
||||
" - python=3.6.2\n",
|
||||
" - pip:\n",
|
||||
" - azureml-defaults\n",
|
||||
" - scikit-learn\n",
|
||||
" - numpy\n",
|
||||
" - inference-schema[numpy-support]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile C:/abc/dockerstep/customDockerStep.txt\n",
|
||||
"RUN echo \"this is test\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile C:/abc/extradata.json\n",
|
||||
"{\n",
|
||||
" \"people\": [\n",
|
||||
" {\n",
|
||||
" \"website\": \"microsoft.com\", \n",
|
||||
" \"from\": \"Seattle\", \n",
|
||||
" \"name\": \"Mrudula\"\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create Inference Configuration\n",
|
||||
"\n",
|
||||
" - source_directory = holds source path as string, this entire folder gets added in image so its really easy to access any files within this folder or subfolder\n",
|
||||
" - runtime = Which runtime to use for the image. Current supported runtimes are 'spark-py' and 'python\n",
|
||||
" - entry_script = contains logic specific to initializing your model and running predictions\n",
|
||||
" - conda_file = manages conda and python package dependencies.\n",
|
||||
" - extra_docker_file_steps = optional: any extra steps you want to inject into docker file"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.model import InferenceConfig\n",
|
||||
"\n",
|
||||
"inference_config = InferenceConfig(source_directory=\"C:/abc\",\n",
|
||||
" runtime= \"python\", \n",
|
||||
" entry_script=\"x/y/score.py\",\n",
|
||||
" conda_file=\"env/myenv.yml\", \n",
|
||||
" extra_docker_file_steps=\"dockerstep/customDockerStep.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Deploy Model as a Local Docker Web Service\n",
|
||||
"\n",
|
||||
"*Make sure you have Docker installed and running.*\n",
|
||||
"\n",
|
||||
"Note that the service creation can take few minutes.\n",
|
||||
"\n",
|
||||
"NOTE:\n",
|
||||
"\n",
|
||||
"we require docker running with linux container. If you are running Docker for Windows, you need to ensure the Linux Engine is running\n",
|
||||
"\n",
|
||||
" powershell command to switch to linux engine\n",
|
||||
" & 'C:\\Program Files\\Docker\\Docker\\DockerCli.exe' -SwitchLinuxEngine\n",
|
||||
"\n",
|
||||
"and c drive is shared https://docs.docker.com/docker-for-windows/#shared-drives\n",
|
||||
"sometimes you have to reshare c drive as docker \n",
|
||||
"\n",
|
||||
"<img src=\"./dockerSharedDrive.JPG\" align=\"left\"/>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"deploy service",
|
||||
"aci"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.webservice import LocalWebservice\n",
|
||||
"\n",
|
||||
"#this is optional, if not provided we choose random port\n",
|
||||
"deployment_config = LocalWebservice.deploy_configuration(port=6789)\n",
|
||||
"\n",
|
||||
"local_service = Model.deploy(ws, \"test\", [model], inference_config, deployment_config)\n",
|
||||
"\n",
|
||||
"local_service.wait_for_deployment()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print('Local service port: {}'.format(local_service.port))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Check Status and Get Container Logs\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(local_service.get_logs())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Test Web Service"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the web service with some input data to get a prediction."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"sample_input = json.dumps({\n",
|
||||
" 'data': [\n",
|
||||
" [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n",
|
||||
" [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]\n",
|
||||
" ]\n",
|
||||
"})\n",
|
||||
"\n",
|
||||
"sample_input = bytes(sample_input, encoding='utf-8')\n",
|
||||
"\n",
|
||||
"print(local_service.run(input_data=sample_input))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Reload Service\n",
|
||||
"\n",
|
||||
"You can update your score.py file and then call `reload()` to quickly restart the service. This will only reload your execution script and dependency files, it will not rebuild the underlying Docker image. As a result, `reload()` is fast, but if you do need to rebuild the image -- to add a new Conda or pip package, for instance -- you will have to call `update()`, instead (see below)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile C:/abc/x/y/score.py\n",
|
||||
"import pickle\n",
|
||||
"import json\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.externals import joblib\n",
|
||||
"from sklearn.linear_model import Ridge\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"\n",
|
||||
"from inference_schema.schema_decorators import input_schema, output_schema\n",
|
||||
"from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType\n",
|
||||
"\n",
|
||||
"def init():\n",
|
||||
" global model\n",
|
||||
" # note here \"sklearn_regression_model.pkl\" is the name of the model registered under\n",
|
||||
" # this is a different behavior than before when the code is run locally, even though the code is the same.\n",
|
||||
" model_path = Model.get_model_path('sklearn_regression_model.pkl')\n",
|
||||
" # deserialize the model file back into a sklearn model\n",
|
||||
" model = joblib.load(model_path)\n",
|
||||
" global name, from_location\n",
|
||||
" # note here, entire source directory on inference config gets added into image\n",
|
||||
" # bellow is the example how you can use any extra files in image\n",
|
||||
" with open('./abc/extradata.json') as json_file: \n",
|
||||
" data = json.load(json_file)\n",
|
||||
" name = data[\"people\"][0][\"name\"]\n",
|
||||
" from_location = data[\"people\"][0][\"from\"]\n",
|
||||
"\n",
|
||||
"input_sample = np.array([[10,9,8,7,6,5,4,3,2,1]])\n",
|
||||
"output_sample = np.array([3726.995])\n",
|
||||
"\n",
|
||||
"@input_schema('data', NumpyParameterType(input_sample))\n",
|
||||
"@output_schema(NumpyParameterType(output_sample))\n",
|
||||
"def run(data):\n",
|
||||
" try:\n",
|
||||
" result = model.predict(data)\n",
|
||||
" # you can return any datatype as long as it is JSON-serializable\n",
|
||||
" return \"Hello \" + name + \" from \" + from_location + \" here is your result = \" + str(result)\n",
|
||||
" except Exception as e:\n",
|
||||
" error = str(e)\n",
|
||||
" return error"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_service.reload()\n",
|
||||
"print(\"--------------------------------------------------------------\")\n",
|
||||
"\n",
|
||||
"# after reload now if you call run this will return updated return message\n",
|
||||
"\n",
|
||||
"print(local_service.run(input_data=sample_input))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Update Service\n",
|
||||
"\n",
|
||||
"If you want to change your model(s), Conda dependencies, or deployment configuration, call `update()` to rebuild the Docker image.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"\n",
|
||||
"local_service.update(models = [SomeOtherModelObject],\n",
|
||||
" deployment_config = local_config,\n",
|
||||
" inference_config = inference_config)\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Delete Service"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_service.delete()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "raymondl"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,349 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Register model and deploy locally\n",
|
||||
"\n",
|
||||
"This example shows how to deploy a web service in step-by-step fashion:\n",
|
||||
"\n",
|
||||
" 1. Register model\n",
|
||||
" 2. Deploy the image as a web service in a local Docker container.\n",
|
||||
" 3. Quickly test changes to your entry script by reloading the local service.\n",
|
||||
" 4. Optionally, you can also make changes to model, conda or extra_docker_file_steps and update local service"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration](../../../configuration.ipynb) Notebook first if you haven't."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Check core SDK version number\n",
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize Workspace\n",
|
||||
"\n",
|
||||
"Initialize a workspace object from persisted configuration."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Register Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can add tags and descriptions to your models. we are using `sklearn_regression_model.pkl` file in the current directory as a model with the same name `sklearn_regression_model.pkl` in the workspace.\n",
|
||||
"\n",
|
||||
"Using tags, you can track useful information such as the name and version of the machine learning library used to train the model, framework, category, target customer etc. Note that tags must be alphanumeric."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"register model from file"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.model import Model\n",
|
||||
"\n",
|
||||
"model = Model.register(model_path = \"sklearn_regression_model.pkl\",\n",
|
||||
" model_name = \"sklearn_regression_model.pkl\",\n",
|
||||
" tags = {'area': \"diabetes\", 'type': \"regression\"},\n",
|
||||
" description = \"Ridge regression model to predict diabetes\",\n",
|
||||
" workspace = ws)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create Inference Configuration"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.model import InferenceConfig\n",
|
||||
"\n",
|
||||
"inference_config = InferenceConfig(runtime= \"python\", \n",
|
||||
" entry_script=\"score.py\",\n",
|
||||
" conda_file=\"myenv.yml\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Deploy Model as a Local Docker Web Service\n",
|
||||
"\n",
|
||||
"*Make sure you have Docker installed and running.*\n",
|
||||
"\n",
|
||||
"Note that the service creation can take few minutes.\n",
|
||||
"\n",
|
||||
"NOTE:\n",
|
||||
"\n",
|
||||
"we require docker running with linux container. If you are running Docker for Windows, you need to ensure the Linux Engine is running\n",
|
||||
"\n",
|
||||
" powershell command to switch to linux engine\n",
|
||||
" & 'C:\\Program Files\\Docker\\Docker\\DockerCli.exe' -SwitchLinuxEngine\n",
|
||||
"\n",
|
||||
"and c drive is shared https://docs.docker.com/docker-for-windows/#shared-drives\n",
|
||||
"sometimes you have to reshare c drive as docker \n",
|
||||
"\n",
|
||||
"<img src=\"./dockerSharedDrive.JPG\" align=\"left\"/>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.webservice import LocalWebservice\n",
|
||||
"\n",
|
||||
"#this is optional, if not provided we choose random port\n",
|
||||
"deployment_config = LocalWebservice.deploy_configuration(port=6789)\n",
|
||||
"\n",
|
||||
"local_service = Model.deploy(ws, \"test\", [model], inference_config, deployment_config)\n",
|
||||
"\n",
|
||||
"local_service.wait_for_deployment()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print('Local service port: {}'.format(local_service.port))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Check Status and Get Container Logs\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(local_service.get_logs())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Test Web Service"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the web service with some input data to get a prediction."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"sample_input = json.dumps({\n",
|
||||
" 'data': [\n",
|
||||
" [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n",
|
||||
" [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]\n",
|
||||
" ]\n",
|
||||
"})\n",
|
||||
"\n",
|
||||
"sample_input = bytes(sample_input, encoding='utf-8')\n",
|
||||
"\n",
|
||||
"print(local_service.run(input_data=sample_input))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Reload Service\n",
|
||||
"\n",
|
||||
"You can update your score.py file and then call `reload()` to quickly restart the service. This will only reload your execution script and dependency files, it will not rebuild the underlying Docker image. As a result, `reload()` is fast, but if you do need to rebuild the image -- to add a new Conda or pip package, for instance -- you will have to call `update()`, instead (see below)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile score.py\n",
|
||||
"import pickle\n",
|
||||
"import json\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.externals import joblib\n",
|
||||
"from sklearn.linear_model import Ridge\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"\n",
|
||||
"from inference_schema.schema_decorators import input_schema, output_schema\n",
|
||||
"from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType\n",
|
||||
"\n",
|
||||
"def init():\n",
|
||||
" global model\n",
|
||||
" # note here \"sklearn_regression_model.pkl\" is the name of the model registered under\n",
|
||||
" # this is a different behavior than before when the code is run locally, even though the code is the same.\n",
|
||||
" model_path = Model.get_model_path('sklearn_regression_model.pkl')\n",
|
||||
" # deserialize the model file back into a sklearn model\n",
|
||||
" model = joblib.load(model_path)\n",
|
||||
"\n",
|
||||
"input_sample = np.array([[10,9,8,7,6,5,4,3,2,1]])\n",
|
||||
"output_sample = np.array([3726.995])\n",
|
||||
"\n",
|
||||
"@input_schema('data', NumpyParameterType(input_sample))\n",
|
||||
"@output_schema(NumpyParameterType(output_sample))\n",
|
||||
"def run(data):\n",
|
||||
" try:\n",
|
||||
" result = model.predict(data)\n",
|
||||
" # you can return any datatype as long as it is JSON-serializable\n",
|
||||
" return 'hello from updated score.py'\n",
|
||||
" except Exception as e:\n",
|
||||
" error = str(e)\n",
|
||||
" return error"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_service.reload()\n",
|
||||
"print(\"--------------------------------------------------------------\")\n",
|
||||
"\n",
|
||||
"# after reload now if you call run this will return updated return message\n",
|
||||
"\n",
|
||||
"print(local_service.run(input_data=sample_input))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Update Service\n",
|
||||
"\n",
|
||||
"If you want to change your model(s), Conda dependencies, or deployment configuration, call `update()` to rebuild the Docker image.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"\n",
|
||||
"local_service.update(models = [SomeOtherModelObject],\n",
|
||||
" deployment_config = local_config,\n",
|
||||
" inference_config = inference_config)\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Delete Service"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_service.delete()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "raymondl"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
34
how-to-use-azureml/deploy-to-local/score.py
Normal file
34
how-to-use-azureml/deploy-to-local/score.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import pickle
|
||||
import json
|
||||
import numpy as np
|
||||
from sklearn.externals import joblib
|
||||
from sklearn.linear_model import Ridge
|
||||
from azureml.core.model import Model
|
||||
|
||||
from inference_schema.schema_decorators import input_schema, output_schema
|
||||
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
|
||||
|
||||
|
||||
def init():
|
||||
global model
|
||||
# note here "sklearn_regression_model.pkl" is the name of the model registered under
|
||||
# this is a different behavior than before when the code is run locally, even though the code is the same.
|
||||
model_path = Model.get_model_path('sklearn_regression_model.pkl')
|
||||
# deserialize the model file back into a sklearn model
|
||||
model = joblib.load(model_path)
|
||||
|
||||
|
||||
input_sample = np.array([[10, 9, 8, 7, 6, 5, 4, 3, 2, 1]])
|
||||
output_sample = np.array([3726.995])
|
||||
|
||||
|
||||
@input_schema('data', NumpyParameterType(input_sample))
|
||||
@output_schema(NumpyParameterType(output_sample))
|
||||
def run(data):
|
||||
try:
|
||||
result = model.predict(data)
|
||||
# you can return any datatype as long as it is JSON-serializable
|
||||
return result.tolist()
|
||||
except Exception as e:
|
||||
error = str(e)
|
||||
return error
|
||||
BIN
how-to-use-azureml/deploy-to-local/sklearn_regression_model.pkl
Normal file
BIN
how-to-use-azureml/deploy-to-local/sklearn_regression_model.pkl
Normal file
Binary file not shown.
89
how-to-use-azureml/deployment/accelerated-models/README.md
Normal file
89
how-to-use-azureml/deployment/accelerated-models/README.md
Normal file
@@ -0,0 +1,89 @@
|
||||
|
||||
# Notebooks for Microsoft Azure Machine Learning Hardware Accelerated Models SDK
|
||||
|
||||
Easily create and train a model using various deep neural networks (DNNs) as a featurizer for deployment to Azure or a Data Box Edge device for ultra-low latency inferencing using FPGA's. These models are currently available:
|
||||
|
||||
* ResNet 50
|
||||
* ResNet 152
|
||||
* DenseNet-121
|
||||
* VGG-16
|
||||
* SSD-VGG
|
||||
|
||||
To learn more about the azureml-accel-model classes, see the section [Model Classes](#model-classes) below or the [Azure ML Accel Models SDK documentation](https://docs.microsoft.com/en-us/python/api/azureml-accel-models/azureml.accel?view=azure-ml-py).
|
||||
|
||||
### Step 1: Create an Azure ML workspace
|
||||
Follow [these instructions](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python) to install the Azure ML SDK on your local machine, create an Azure ML workspace, and set up your notebook environment, which is required for the next step.
|
||||
|
||||
### Step 2: Install the Azure ML Accelerated Models SDK
|
||||
Once you have set up your environment, install the Azure ML Accel Models SDK. This package requires tensorflow >= 1.6,<2.0 to be installed.
|
||||
|
||||
If you already have tensorflow >= 1.6,<2.0 installed in your development environment, you can install the SDK package using:
|
||||
|
||||
```
|
||||
pip install azureml-accel-models
|
||||
```
|
||||
|
||||
If you do not have tensorflow >= 1.6,<2.0 and are using a CPU-only development environment, our SDK with tensorflow can be installed using:
|
||||
|
||||
```
|
||||
pip install azureml-accel-models[cpu]
|
||||
```
|
||||
|
||||
If your machine supports GPU (for example, on an [Azure DSVM](https://docs.microsoft.com/en-us/azure/machine-learning/data-science-virtual-machine/overview)), then you can leverage the tensorflow-gpu functionality using:
|
||||
|
||||
```
|
||||
pip install azureml-accel-models[gpu]
|
||||
```
|
||||
|
||||
### Step 3: Follow our notebooks
|
||||
|
||||
The notebooks in this repo walk through the following scenarios:
|
||||
* [Quickstart](accelerated-models-quickstart.ipynb), deploy and inference a ResNet50 model trained on ImageNet
|
||||
* [Object Detection](accelerated-models-object-detection.ipynb), deploy and inference an SSD-VGG model that can do object detection
|
||||
* [Training models](accelerated-models-training.ipynb), train one of our accelerated models on the Kaggle Cats and Dogs dataset to see how to improve accuracy on custom datasets
|
||||
|
||||
<a name="model-classes"></a>
|
||||
## Model Classes
|
||||
As stated above, we support 5 Accelerated Models. Here's more information on their input and output tensors.
|
||||
|
||||
**Available models and output tensors**
|
||||
|
||||
The available models and the corresponding default classifier output tensors are below. This is the value that you would use during inferencing if you used the default classifier.
|
||||
* Resnet50, QuantizedResnet50
|
||||
``
|
||||
output_tensors = "classifier_1/resnet_v1_50/predictions/Softmax:0"
|
||||
``
|
||||
* Resnet152, QuantizedResnet152
|
||||
``
|
||||
output_tensors = "classifier/resnet_v1_152/predictions/Softmax:0"
|
||||
``
|
||||
* Densenet121, QuantizedDensenet121
|
||||
``
|
||||
output_tensors = "classifier/densenet121/predictions/Softmax:0"
|
||||
``
|
||||
* Vgg16, QuantizedVgg16
|
||||
``
|
||||
output_tensors = "classifier/vgg_16/fc8/squeezed:0"
|
||||
``
|
||||
* SsdVgg, QuantizedSsdVgg
|
||||
``
|
||||
output_tensors = ['ssd_300_vgg/block4_box/Reshape_1:0', 'ssd_300_vgg/block7_box/Reshape_1:0', 'ssd_300_vgg/block8_box/Reshape_1:0', 'ssd_300_vgg/block9_box/Reshape_1:0', 'ssd_300_vgg/block10_box/Reshape_1:0', 'ssd_300_vgg/block11_box/Reshape_1:0', 'ssd_300_vgg/block4_box/Reshape:0', 'ssd_300_vgg/block7_box/Reshape:0', 'ssd_300_vgg/block8_box/Reshape:0', 'ssd_300_vgg/block9_box/Reshape:0', 'ssd_300_vgg/block10_box/Reshape:0', 'ssd_300_vgg/block11_box/Reshape:0']
|
||||
``
|
||||
|
||||
For more information, please reference the azureml.accel.models package in the [Azure ML Python SDK documentation](https://docs.microsoft.com/en-us/python/api/azureml-accel-models/azureml.accel.models?view=azure-ml-py).
|
||||
|
||||
**Input tensors**
|
||||
|
||||
The input_tensors value defaults to "Placeholder:0" and is created in the [Image Preprocessing](#construct-model) step in the line:
|
||||
``
|
||||
in_images = tf.placeholder(tf.string)
|
||||
``
|
||||
|
||||
You can change the input_tensors name by doing this:
|
||||
``
|
||||
in_images = tf.placeholder(tf.string, name="images")
|
||||
``
|
||||
|
||||
|
||||
## Resources
|
||||
* [Read more about FPGAs](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-accelerate-with-fpgas)
|
||||
@@ -0,0 +1,490 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Azure ML Hardware Accelerated Object Detection"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This tutorial will show you how to deploy an object detection service based on the SSD-VGG model in just a few minutes using the Azure Machine Learning Accelerated AI service.\n",
|
||||
"\n",
|
||||
"We will use the SSD-VGG model accelerated on an FPGA. Our Accelerated Models Service handles translating deep neural networks (DNN) into an FPGA program.\n",
|
||||
"\n",
|
||||
"The steps in this notebook are: \n",
|
||||
"1. [Setup Environment](#set-up-environment)\n",
|
||||
"* [Construct Model](#construct-model)\n",
|
||||
" * Image Preprocessing\n",
|
||||
" * Featurizer\n",
|
||||
" * Save Model\n",
|
||||
" * Save input and output tensor names\n",
|
||||
"* [Create Image](#create-image)\n",
|
||||
"* [Deploy Image](#deploy-image)\n",
|
||||
"* [Test the Service](#test-service)\n",
|
||||
" * Create Client\n",
|
||||
" * Serve the model\n",
|
||||
"* [Cleanup](#cleanup)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"set-up-environment\"></a>\n",
|
||||
"## 1. Set up Environment\n",
|
||||
"### 1.a. Imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import tensorflow as tf"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 1.b. Retrieve Workspace\n",
|
||||
"If you haven't created a Workspace, please follow [this notebook](\"../../../configuration.ipynb\") to do so. If you have, run the codeblock below to retrieve it. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"construct-model\"></a>\n",
|
||||
"## 2. Construct model\n",
|
||||
"### 2.a. Image preprocessing\n",
|
||||
"We'd like our service to accept JPEG images as input. However the input to SSD-VGG is a float tensor of shape \\[1, 300, 300, 3\\]. The first dimension is batch, then height, width, and channels (i.e. NHWC). To bridge this gap, we need code that decodes JPEG images and resizes them appropriately for input to SSD-VGG. The Accelerated AI service can execute TensorFlow graphs as part of the service and we'll use that ability to do the image preprocessing. This code defines a TensorFlow graph that preprocesses an array of JPEG images (as TensorFlow strings) and produces a tensor that is ready to be featurized by SSD-VGG.\n",
|
||||
"\n",
|
||||
"**Note:** Expect to see TF deprecation warnings until we port our SDK over to use Tensorflow 2.0."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Input images as a two-dimensional tensor containing an arbitrary number of images represented a strings\n",
|
||||
"import azureml.accel.models.utils as utils\n",
|
||||
"tf.reset_default_graph()\n",
|
||||
"\n",
|
||||
"in_images = tf.placeholder(tf.string)\n",
|
||||
"image_tensors = utils.preprocess_array(in_images, output_width=300, output_height=300, preserve_aspect_ratio=False)\n",
|
||||
"print(image_tensors.shape)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.b. Featurizer\n",
|
||||
"The SSD-VGG model is different from our other models in that it generates 12 tensor outputs. These corresponds to x,y displacements of the anchor boxes and the detection confidence (for 21 classes). Because these outputs are not convenient to work with, we will later use a pre-defined post-processing utility to transform the outputs into a simplified list of bounding boxes with their respective class and confidence.\n",
|
||||
"\n",
|
||||
"For more information about the output tensors, take this example: the output tensor 'ssd_300_vgg/block4_box/Reshape_1:0' has a shape of [None, 37, 37, 4, 21]. This gives the pre-softmax confidence for 4 anchor boxes situated at each site of a 37 x 37 grid imposed on the image, one confidence score for each of the 21 classes. The first dimension is the batch dimension. Likewise, 'ssd_300_vgg/block4_box/Reshape:0' has shape [None, 37, 37, 4, 4] and encodes the (cx, cy) center shift and rescaling (sw, sh) relative to each anchor box. Refer to the [SSD-VGG paper](https://arxiv.org/abs/1512.02325) to understand how these are computed. The other 10 tensors are defined similarly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.accel.models import SsdVgg\n",
|
||||
"\n",
|
||||
"saved_model_dir = os.path.join(os.path.expanduser('~'), 'models')\n",
|
||||
"model_graph = SsdVgg(saved_model_dir, is_frozen = True)\n",
|
||||
"\n",
|
||||
"print('SSD-VGG Input Tensors:')\n",
|
||||
"for idx, input_name in enumerate(model_graph.input_tensor_list):\n",
|
||||
" print('{}, {}'.format(input_name, model_graph.get_input_dims(idx)))\n",
|
||||
" \n",
|
||||
"print('SSD-VGG Output Tensors:')\n",
|
||||
"for idx, output_name in enumerate(model_graph.output_tensor_list):\n",
|
||||
" print('{}, {}'.format(output_name, model_graph.get_output_dims(idx)))\n",
|
||||
"\n",
|
||||
"ssd_outputs = model_graph.import_graph_def(image_tensors, is_training=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.c. Save Model\n",
|
||||
"Now that we loaded both parts of the tensorflow graph (preprocessor and SSD-VGG featurizer), we can save the graph and associated variables to a directory which we can register as an Azure ML Model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_name = \"ssdvgg\"\n",
|
||||
"model_save_path = os.path.join(saved_model_dir, model_name, \"saved_model\")\n",
|
||||
"print(\"Saving model in {}\".format(model_save_path))\n",
|
||||
"\n",
|
||||
"output_map = {}\n",
|
||||
"for i, output in enumerate(ssd_outputs):\n",
|
||||
" output_map['out_{}'.format(i)] = output\n",
|
||||
"\n",
|
||||
"with tf.Session() as sess:\n",
|
||||
" model_graph.restore_weights(sess)\n",
|
||||
" tf.saved_model.simple_save(sess, \n",
|
||||
" model_save_path, \n",
|
||||
" inputs={'images': in_images}, \n",
|
||||
" outputs=output_map)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.d. Important! Save names of input and output tensors\n",
|
||||
"\n",
|
||||
"These input and output tensors that were created during the preprocessing and classifier steps are also going to be used when **converting the model** to an Accelerated Model that can run on FPGA's and for **making an inferencing request**. It is very important to save this information!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"register model from file"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_tensors = in_images.name\n",
|
||||
"# We will use the list of output tensors during inferencing\n",
|
||||
"output_tensors = [output.name for output in ssd_outputs]\n",
|
||||
"# However, for multiple output tensors, our AccelOnnxConverter will \n",
|
||||
"# accept comma-delimited strings (lists will cause error)\n",
|
||||
"output_tensors_str = \",\".join(output_tensors)\n",
|
||||
"\n",
|
||||
"print(input_tensors)\n",
|
||||
"print(output_tensors)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"create-image\"></a>\n",
|
||||
"## 3. Create AccelContainerImage\n",
|
||||
"Below we will execute all the same steps as in the [Quickstart](./accelerated-models-quickstart.ipynb#create-image) to package the model we have saved locally into an accelerated Docker image saved in our workspace. To complete all the steps, it may take a few minutes. For more details on each step, check out the [Quickstart section on model registration](./accelerated-models-quickstart.ipynb#register-model)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"from azureml.core.image import Image\n",
|
||||
"from azureml.accel import AccelOnnxConverter\n",
|
||||
"from azureml.accel import AccelContainerImage\n",
|
||||
"\n",
|
||||
"# Retrieve workspace\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print(\"Successfully retrieved workspace:\", ws.name, ws.resource_group, ws.location, ws.subscription_id, '\\n')\n",
|
||||
"\n",
|
||||
"# Register model\n",
|
||||
"registered_model = Model.register(workspace = ws,\n",
|
||||
" model_path = model_save_path,\n",
|
||||
" model_name = model_name)\n",
|
||||
"print(\"Successfully registered: \", registered_model.name, registered_model.description, registered_model.version, '\\n', sep = '\\t')\n",
|
||||
"\n",
|
||||
"# Convert model\n",
|
||||
"convert_request = AccelOnnxConverter.convert_tf_model(ws, registered_model, input_tensors, output_tensors_str)\n",
|
||||
"# If it fails, you can run wait_for_completion again with show_output=True.\n",
|
||||
"convert_request.wait_for_completion(show_output=False)\n",
|
||||
"converted_model = convert_request.result\n",
|
||||
"print(\"\\nSuccessfully converted: \", converted_model.name, converted_model.url, converted_model.version, \n",
|
||||
" converted_model.id, converted_model.created_time, '\\n')\n",
|
||||
"\n",
|
||||
"# Package into AccelContainerImage\n",
|
||||
"image_config = AccelContainerImage.image_configuration()\n",
|
||||
"# Image name must be lowercase\n",
|
||||
"image_name = \"{}-image\".format(model_name)\n",
|
||||
"image = Image.create(name = image_name,\n",
|
||||
" models = [converted_model],\n",
|
||||
" image_config = image_config, \n",
|
||||
" workspace = ws)\n",
|
||||
"image.wait_for_creation()\n",
|
||||
"print(\"Created AccelContainerImage: {} {} {}\\n\".format(image.name, image.creation_state, image.image_location))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"deploy-image\"></a>\n",
|
||||
"## 4. Deploy image\n",
|
||||
"Once you have an Azure ML Accelerated Image in your Workspace, you can deploy it to two destinations, to a Databox Edge machine or to an AKS cluster. \n",
|
||||
"\n",
|
||||
"### 4.a. Deploy to Databox Edge Machine using IoT Hub\n",
|
||||
"See the sample [here](https://github.com/Azure-Samples/aml-real-time-ai/) for using the Azure IoT CLI extension for deploying your Docker image to your Databox Edge Machine.\n",
|
||||
"\n",
|
||||
"### 4.b. Deploy to AKS Cluster\n",
|
||||
"Same as in the [Quickstart section on image deployment](./accelerated-models-quickstart.ipynb#deploy-image), we are going to create an AKS cluster with FPGA-enabled machines, then deploy our service to it.\n",
|
||||
"#### Create AKS ComputeTarget"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import AksCompute, ComputeTarget\n",
|
||||
"\n",
|
||||
"# Uses the specific FPGA enabled VM (sku: Standard_PB6s)\n",
|
||||
"# Authentication is enabled by default, but for testing we specify False\n",
|
||||
"prov_config = AksCompute.provisioning_configuration(vm_size = \"Standard_PB6s\",\n",
|
||||
" agent_count = 1)\n",
|
||||
"\n",
|
||||
"aks_name = 'my-aks-pb6-ssd-vgg'\n",
|
||||
"# Create the cluster\n",
|
||||
"aks_target = ComputeTarget.create(workspace = ws, \n",
|
||||
" name = aks_name, \n",
|
||||
" provisioning_configuration = prov_config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Provisioning an AKS cluster might take awhile (15 or so minutes), and we want to wait until it's successfully provisioned before we can deploy a service to it. If you interrupt this cell, provisioning of the cluster will continue. You can re-run it or check the status in your Workspace under Compute."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"aks_target.wait_for_completion(show_output = True)\n",
|
||||
"print(aks_target.provisioning_state)\n",
|
||||
"print(aks_target.provisioning_errors)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Deploy AccelContainerImage to AKS ComputeTarget"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.webservice import Webservice, AksWebservice\n",
|
||||
"\n",
|
||||
"# Set the web service configuration (for creating a test service, we don't want autoscale enabled)\n",
|
||||
"aks_config = AksWebservice.deploy_configuration(autoscale_enabled=False,\n",
|
||||
" num_replicas=1,\n",
|
||||
" auth_enabled = False)\n",
|
||||
"\n",
|
||||
"aks_service_name ='my-aks-service'\n",
|
||||
"\n",
|
||||
"aks_service = Webservice.deploy_from_image(workspace = ws,\n",
|
||||
" name = aks_service_name,\n",
|
||||
" image = image,\n",
|
||||
" deployment_config = aks_config,\n",
|
||||
" deployment_target = aks_target)\n",
|
||||
"aks_service.wait_for_deployment(show_output = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"test-service\"></a>\n",
|
||||
"## 5. Test the service\n",
|
||||
"<a id=\"create-client\"></a>\n",
|
||||
"### 5.a. Create Client\n",
|
||||
"The image supports gRPC and the TensorFlow Serving \"predict\" API. We have a client that can call into the docker image to get predictions. \n",
|
||||
"\n",
|
||||
"**Note:** If you chose to use auth_enabled=True when creating your AksWebservice.deploy_configuration(), see documentation [here](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice(class)?view=azure-ml-py#get-keys--) on how to retrieve your keys and use either key as an argument to PredictionClient(...,access_token=key)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Using the grpc client in AzureML Accelerated Models SDK\n",
|
||||
"from azureml.accel.client import PredictionClient\n",
|
||||
"\n",
|
||||
"address = aks_service.scoring_uri\n",
|
||||
"ssl_enabled = address.startswith(\"https\")\n",
|
||||
"address = address[address.find('/')+2:].strip('/')\n",
|
||||
"port = 443 if ssl_enabled else 80\n",
|
||||
"\n",
|
||||
"# Initialize AzureML Accelerated Models client\n",
|
||||
"client = PredictionClient(address=address,\n",
|
||||
" port=port,\n",
|
||||
" use_ssl=ssl_enabled,\n",
|
||||
" service_name=aks_service.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can adapt the client [code](https://github.com/Azure/aml-real-time-ai/blob/master/pythonlib/amlrealtimeai/client.py) to meet your needs. There is also an example C# [client](https://github.com/Azure/aml-real-time-ai/blob/master/sample-clients/csharp).\n",
|
||||
"\n",
|
||||
"The service provides an API that is compatible with TensorFlow Serving. There are instructions to download a sample client [here](https://www.tensorflow.org/serving/setup)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"serve-model\"></a>\n",
|
||||
"### 5.b. Serve the model\n",
|
||||
"The SSD-VGG model returns the confidence and bounding boxes for all possible anchor boxes. As mentioned earlier, we will use a post-processing routine to transform this into a list of bounding boxes (y1, x1, y2, x2) where x, y are fractional coordinates measured from left and top respectively. A respective list of classes and scores is also returned to tag each bounding box. Below we make use of this information to draw the bounding boxes on top the original image. Note that in the post-processing routine we select a confidence threshold of 0.5."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import cv2\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"\n",
|
||||
"colors_tableau = [(255, 255, 255), (31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),\n",
|
||||
" (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),\n",
|
||||
" (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),\n",
|
||||
" (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),\n",
|
||||
" (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def draw_boxes_on_img(img, classes, scores, bboxes, thickness=2):\n",
|
||||
" shape = img.shape\n",
|
||||
" for i in range(bboxes.shape[0]):\n",
|
||||
" bbox = bboxes[i]\n",
|
||||
" color = colors_tableau[classes[i]]\n",
|
||||
" # Draw bounding box...\n",
|
||||
" p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))\n",
|
||||
" p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))\n",
|
||||
" cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)\n",
|
||||
" # Draw text...\n",
|
||||
" s = '%s/%.3f' % (classes[i], scores[i])\n",
|
||||
" p1 = (p1[0]-5, p1[1])\n",
|
||||
" cv2.putText(img, s, p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.4, color, 1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.accel._external.ssdvgg_utils as ssdvgg_utils\n",
|
||||
"\n",
|
||||
"result = client.score_file(path=\"meeting.jpg\", input_name=input_tensors, outputs=output_tensors)\n",
|
||||
"classes, scores, bboxes = ssdvgg_utils.postprocess(result, select_threshold=0.5)\n",
|
||||
"\n",
|
||||
"img = cv2.imread('meeting.jpg', 1)\n",
|
||||
"img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
|
||||
"draw_boxes_on_img(img, classes, scores, bboxes)\n",
|
||||
"plt.imshow(img)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"cleanup\"></a>\n",
|
||||
"## 6. Cleanup\n",
|
||||
"It's important to clean up your resources, so that you won't incur unnecessary costs. In the [next notebook](./accelerated-models-training.ipynb) you will learn how to train a classfier on a new dataset using transfer learning."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"aks_service.delete()\n",
|
||||
"aks_target.delete()\n",
|
||||
"image.delete()\n",
|
||||
"registered_model.delete()\n",
|
||||
"converted_model.delete()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "coverste"
|
||||
},
|
||||
{
|
||||
"name": "paledger"
|
||||
},
|
||||
{
|
||||
"name": "sukha"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,544 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Azure ML Hardware Accelerated Models Quickstart"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This tutorial will show you how to deploy an image recognition service based on the ResNet 50 classifier using the Azure Machine Learning Accelerated Models service. Get more information about our service from our [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-accelerate-with-fpgas), [API reference](https://docs.microsoft.com/en-us/python/api/azureml-accel-models/azureml.accel?view=azure-ml-py), or [forum](https://aka.ms/aml-forum).\n",
|
||||
"\n",
|
||||
"We will use an accelerated ResNet50 featurizer running on an FPGA. Our Accelerated Models Service handles translating deep neural networks (DNN) into an FPGA program.\n",
|
||||
"\n",
|
||||
"For more information about using other models besides Resnet50, see the [README](./README.md).\n",
|
||||
"\n",
|
||||
"The steps covered in this notebook are: \n",
|
||||
"1. [Set up environment](#set-up-environment)\n",
|
||||
"* [Construct model](#construct-model)\n",
|
||||
" * Image Preprocessing\n",
|
||||
" * Featurizer (Resnet50)\n",
|
||||
" * Classifier\n",
|
||||
" * Save Model\n",
|
||||
"* [Register Model](#register-model)\n",
|
||||
"* [Convert into Accelerated Model](#convert-model)\n",
|
||||
"* [Create Image](#create-image)\n",
|
||||
"* [Deploy](#deploy-image)\n",
|
||||
"* [Test service](#test-service)\n",
|
||||
"* [Clean-up](#clean-up)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"set-up-environment\"></a>\n",
|
||||
"## 1. Set up environment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import tensorflow as tf"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve Workspace\n",
|
||||
"If you haven't created a Workspace, please follow [this notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) to do so. If you have, run the codeblock below to retrieve it. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"construct-model\"></a>\n",
|
||||
"## 2. Construct model\n",
|
||||
"\n",
|
||||
"There are three parts to the model we are deploying: pre-processing, featurizer with ResNet50, and classifier with ImageNet dataset. Then we will save this complete Tensorflow model graph locally before registering it to your Azure ML Workspace.\n",
|
||||
"\n",
|
||||
"### 2.a. Image preprocessing\n",
|
||||
"We'd like our service to accept JPEG images as input. However the input to ResNet50 is a tensor. So we need code that decodes JPEG images and does the preprocessing required by ResNet50. The Accelerated AI service can execute TensorFlow graphs as part of the service and we'll use that ability to do the image preprocessing. This code defines a TensorFlow graph that preprocesses an array of JPEG images (as strings) and produces a tensor that is ready to be featurized by ResNet50.\n",
|
||||
"\n",
|
||||
"**Note:** Expect to see TF deprecation warnings until we port our SDK over to use Tensorflow 2.0."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Input images as a two-dimensional tensor containing an arbitrary number of images represented a strings\n",
|
||||
"import azureml.accel.models.utils as utils\n",
|
||||
"tf.reset_default_graph()\n",
|
||||
"\n",
|
||||
"in_images = tf.placeholder(tf.string)\n",
|
||||
"image_tensors = utils.preprocess_array(in_images)\n",
|
||||
"print(image_tensors.shape)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.b. Featurizer\n",
|
||||
"We use ResNet50 as a featurizer. In this step we initialize the model. This downloads a TensorFlow checkpoint of the quantized ResNet50."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.accel.models import QuantizedResnet50\n",
|
||||
"save_path = os.path.expanduser('~/models')\n",
|
||||
"model_graph = QuantizedResnet50(save_path, is_frozen = True)\n",
|
||||
"feature_tensor = model_graph.import_graph_def(image_tensors)\n",
|
||||
"print(model_graph.version)\n",
|
||||
"print(feature_tensor.name)\n",
|
||||
"print(feature_tensor.shape)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.c. Classifier\n",
|
||||
"The model we downloaded includes a classifier which takes the output of the ResNet50 and identifies an image. This classifier is trained on the ImageNet dataset. We are going to use this classifier for our service. The next [notebook](./accelerated-models-training.ipynb) shows how to train a classifier for a different data set. The input to the classifier is a tensor matching the output of our ResNet50 featurizer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"classifier_output = model_graph.get_default_classifier(feature_tensor)\n",
|
||||
"print(classifier_output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.d. Save Model\n",
|
||||
"Now that we loaded all three parts of the tensorflow graph (preprocessor, resnet50 featurizer, and the classifier), we can save the graph and associated variables to a directory which we can register as an Azure ML Model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# model_name must be lowercase\n",
|
||||
"model_name = \"resnet50\"\n",
|
||||
"model_save_path = os.path.join(save_path, model_name)\n",
|
||||
"print(\"Saving model in {}\".format(model_save_path))\n",
|
||||
"\n",
|
||||
"with tf.Session() as sess:\n",
|
||||
" model_graph.restore_weights(sess)\n",
|
||||
" tf.saved_model.simple_save(sess, model_save_path,\n",
|
||||
" inputs={'images': in_images},\n",
|
||||
" outputs={'output_alias': classifier_output})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.e. Important! Save names of input and output tensors\n",
|
||||
"\n",
|
||||
"These input and output tensors that were created during the preprocessing and classifier steps are also going to be used when **converting the model** to an Accelerated Model that can run on FPGA's and for **making an inferencing request**. It is very important to save this information! You can see our defaults for all the models in the [README](./README.md).\n",
|
||||
"\n",
|
||||
"By default for Resnet50, these are the values you should see when running the cell below: \n",
|
||||
"* input_tensors = \"Placeholder:0\"\n",
|
||||
"* output_tensors = \"classifier/resnet_v1_50/predictions/Softmax:0\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"register model from file"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_tensors = in_images.name\n",
|
||||
"output_tensors = classifier_output.name\n",
|
||||
"\n",
|
||||
"print(input_tensors)\n",
|
||||
"print(output_tensors)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"register-model\"></a>\n",
|
||||
"## 3. Register Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can add tags and descriptions to your models. Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"register model from file"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.model import Model\n",
|
||||
"\n",
|
||||
"registered_model = Model.register(workspace = ws,\n",
|
||||
" model_path = model_save_path,\n",
|
||||
" model_name = model_name)\n",
|
||||
"\n",
|
||||
"print(\"Successfully registered: \", registered_model.name, registered_model.description, registered_model.version, sep = '\\t')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"convert-model\"></a>\n",
|
||||
"## 4. Convert Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For conversion you need to provide names of input and output tensors. This information can be found from the model_graph you saved in step 2.e. above.\n",
|
||||
"\n",
|
||||
"**Note**: Conversion may take a while and on average for FPGA model it is about 1-3 minutes and it depends on model type."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"register model from file"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.accel import AccelOnnxConverter\n",
|
||||
"\n",
|
||||
"convert_request = AccelOnnxConverter.convert_tf_model(ws, registered_model, input_tensors, output_tensors)\n",
|
||||
"# If it fails, you can run wait_for_completion again with show_output=True.\n",
|
||||
"convert_request.wait_for_completion(show_output = False)\n",
|
||||
"# If the above call succeeded, get the converted model\n",
|
||||
"converted_model = convert_request.result\n",
|
||||
"print(\"\\nSuccessfully converted: \", converted_model.name, converted_model.url, converted_model.version, \n",
|
||||
" converted_model.id, converted_model.created_time, '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"create-image\"></a>\n",
|
||||
"## 5. Package the model into an Image"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can add tags and descriptions to image. Also, for FPGA model an image can only contain **single** model.\n",
|
||||
"\n",
|
||||
"**Note**: The following command can take few minutes. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.image import Image\n",
|
||||
"from azureml.accel import AccelContainerImage\n",
|
||||
"\n",
|
||||
"image_config = AccelContainerImage.image_configuration()\n",
|
||||
"# Image name must be lowercase\n",
|
||||
"image_name = \"{}-image\".format(model_name)\n",
|
||||
"\n",
|
||||
"image = Image.create(name = image_name,\n",
|
||||
" models = [converted_model],\n",
|
||||
" image_config = image_config, \n",
|
||||
" workspace = ws)\n",
|
||||
"image.wait_for_creation(show_output = False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"deploy-image\"></a>\n",
|
||||
"## 6. Deploy\n",
|
||||
"Once you have an Azure ML Accelerated Image in your Workspace, you can deploy it to two destinations, to a Databox Edge machine or to an AKS cluster. \n",
|
||||
"\n",
|
||||
"### 6.a. Databox Edge Machine using IoT Hub\n",
|
||||
"See the sample [here](https://github.com/Azure-Samples/aml-real-time-ai/) for using the Azure IoT CLI extension for deploying your Docker image to your Databox Edge Machine.\n",
|
||||
"\n",
|
||||
"### 6.b. Azure Kubernetes Service (AKS) using Azure ML Service\n",
|
||||
"We are going to create an AKS cluster with FPGA-enabled machines, then deploy our service to it. For more information, see [AKS official docs](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where#aks).\n",
|
||||
"\n",
|
||||
"#### Create AKS ComputeTarget"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import AksCompute, ComputeTarget\n",
|
||||
"\n",
|
||||
"# Uses the specific FPGA enabled VM (sku: Standard_PB6s)\n",
|
||||
"# Authentication is enabled by default, but for testing we specify False\n",
|
||||
"prov_config = AksCompute.provisioning_configuration(vm_size = \"Standard_PB6s\",\n",
|
||||
" agent_count = 1)\n",
|
||||
"\n",
|
||||
"aks_name = 'my-aks-pb6'\n",
|
||||
"# Create the cluster\n",
|
||||
"aks_target = ComputeTarget.create(workspace = ws, \n",
|
||||
" name = aks_name, \n",
|
||||
" provisioning_configuration = prov_config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Provisioning an AKS cluster might take awhile (15 or so minutes), and we want to wait until it's successfully provisioned before we can deploy a service to it. If you interrupt this cell, provisioning of the cluster will continue. You can also check the status in your Workspace under Compute."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"aks_target.wait_for_completion(show_output = True)\n",
|
||||
"print(aks_target.provisioning_state)\n",
|
||||
"print(aks_target.provisioning_errors)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Deploy AccelContainerImage to AKS ComputeTarget"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.webservice import Webservice, AksWebservice\n",
|
||||
"\n",
|
||||
"#Set the web service configuration (for creating a test service, we don't want autoscale enabled)\n",
|
||||
"aks_config = AksWebservice.deploy_configuration(autoscale_enabled=False,\n",
|
||||
" num_replicas=1,\n",
|
||||
" auth_enabled = False)\n",
|
||||
"\n",
|
||||
"aks_service_name ='my-aks-service'\n",
|
||||
"\n",
|
||||
"aks_service = Webservice.deploy_from_image(workspace = ws,\n",
|
||||
" name = aks_service_name,\n",
|
||||
" image = image,\n",
|
||||
" deployment_config = aks_config,\n",
|
||||
" deployment_target = aks_target)\n",
|
||||
"aks_service.wait_for_deployment(show_output = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"test-service\"></a>\n",
|
||||
"## 7. Test the service"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 7.a. Create Client\n",
|
||||
"The image supports gRPC and the TensorFlow Serving \"predict\" API. We have a client that can call into the docker image to get predictions.\n",
|
||||
"\n",
|
||||
"**Note:** If you chose to use auth_enabled=True when creating your AksWebservice, see documentation [here](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice(class)?view=azure-ml-py#get-keys--) on how to retrieve your keys and use either key as an argument to PredictionClient(...,access_token=key)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Using the grpc client in AzureML Accelerated Models SDK\n",
|
||||
"from azureml.accel.client import PredictionClient\n",
|
||||
"\n",
|
||||
"address = aks_service.scoring_uri\n",
|
||||
"ssl_enabled = address.startswith(\"https\")\n",
|
||||
"address = address[address.find('/')+2:].strip('/')\n",
|
||||
"port = 443 if ssl_enabled else 80\n",
|
||||
"\n",
|
||||
"# Initialize AzureML Accelerated Models client\n",
|
||||
"client = PredictionClient(address=address,\n",
|
||||
" port=port,\n",
|
||||
" use_ssl=ssl_enabled,\n",
|
||||
" service_name=aks_service.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can adapt the client [code](https://github.com/Azure/aml-real-time-ai/blob/master/pythonlib/amlrealtimeai/client.py) to meet your needs. There is also an example C# [client](https://github.com/Azure/aml-real-time-ai/blob/master/sample-clients/csharp).\n",
|
||||
"\n",
|
||||
"The service provides an API that is compatible with TensorFlow Serving. There are instructions to download a sample client [here](https://www.tensorflow.org/serving/setup)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 7.b. Serve the model\n",
|
||||
"To understand the results we need a mapping to the human readable imagenet classes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"classes_entries = requests.get(\"https://raw.githubusercontent.com/Lasagne/Recipes/master/examples/resnet50/imagenet_classes.txt\").text.splitlines()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Score image with input and output tensor names\n",
|
||||
"results = client.score_file(path=\"./snowleopardgaze.jpg\", \n",
|
||||
" input_name=input_tensors, \n",
|
||||
" outputs=output_tensors)\n",
|
||||
"\n",
|
||||
"# map results [class_id] => [confidence]\n",
|
||||
"results = enumerate(results)\n",
|
||||
"# sort results by confidence\n",
|
||||
"sorted_results = sorted(results, key=lambda x: x[1], reverse=True)\n",
|
||||
"# print top 5 results\n",
|
||||
"for top in sorted_results[:5]:\n",
|
||||
" print(classes_entries[top[0]], 'confidence:', top[1])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"clean-up\"></a>\n",
|
||||
"## 8. Clean-up\n",
|
||||
"Run the cell below to delete your webservice, image, and model (must be done in that order). In the [next notebook](./accelerated-models-training.ipynb) you will learn how to train a classfier on a new dataset using transfer learning and finetune the weights."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"aks_service.delete()\n",
|
||||
"aks_target.delete()\n",
|
||||
"image.delete()\n",
|
||||
"registered_model.delete()\n",
|
||||
"converted_model.delete()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "coverste"
|
||||
},
|
||||
{
|
||||
"name": "paledger"
|
||||
},
|
||||
{
|
||||
"name": "aibhalla"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,858 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Training with the Azure Machine Learning Accelerated Models Service"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook will introduce how to apply common machine learning techniques, like transfer learning, custom weights, and unquantized vs. quantized models, when working with our Azure Machine Learning Accelerated Models Service (Azure ML Accel Models).\n",
|
||||
"\n",
|
||||
"We will use Tensorflow for the preprocessing steps, ResNet50 for the featurizer, and the Keras API (built on Tensorflow backend) to build the classifier layers instead of the default ImageNet classifier used in Quickstart. Then we will train the model, evaluate it, and deploy it to run on an FPGA.\n",
|
||||
"\n",
|
||||
"#### Transfer Learning and Custom weights\n",
|
||||
"We will walk you through two ways to build and train a ResNet50 model on the Kaggle Cats and Dogs dataset: transfer learning only and then transfer learning with custom weights.\n",
|
||||
"\n",
|
||||
"In using transfer learning, our goal is to re-purpose the ResNet50 model already trained on the [ImageNet image dataset](http://www.image-net.org/) as a basis for our training of the Kaggle Cats and Dogs dataset. The ResNet50 featurizer will be imported as frozen, so only the Keras classifier will be trained.\n",
|
||||
"\n",
|
||||
"With the addition of custom weights, we will build the model so that the ResNet50 featurizer weights as not frozen. This will let us retrain starting with custom weights trained with ImageNet on ResNet50 and then use the Kaggle Cats and Dogs dataset to retrain and fine-tune the quantized version of the model.\n",
|
||||
"\n",
|
||||
"#### Unquantized vs. Quantized models\n",
|
||||
"The unquantized version of our models (ie. Resnet50, Resnet152, Densenet121, Vgg16, SsdVgg) uses native float precision (32-bit floats), which will be faster at training. We will use this for our first run through, then fine-tune the weights with the quantized version. The quantized version of our models (i.e. QuantizedResnet50, QuantizedResnet152, QuantizedDensenet121, QuantizedVgg16, QuantizedSsdVgg) will have the same node names as the unquantized version, but use quantized operations and will match the performance of the model when running on an FPGA.\n",
|
||||
"\n",
|
||||
"#### Contents\n",
|
||||
"1. [Setup Environment](#setup)\n",
|
||||
"* [Prepare Data](#prepare-data)\n",
|
||||
"* [Construct Model](#construct-model)\n",
|
||||
" * Preprocessor\n",
|
||||
" * Classifier\n",
|
||||
" * Model construction\n",
|
||||
"* [Train Model](#train-model)\n",
|
||||
"* [Test Model](#test-model)\n",
|
||||
"* [Execution](#execution)\n",
|
||||
" * [Transfer Learning](#transfer-learning)\n",
|
||||
" * [Transfer Learning with Custom Weights](#custom-weights)\n",
|
||||
"* [Create Image](#create-image)\n",
|
||||
"* [Deploy Model](#deploy-model)\n",
|
||||
"* [Test the service](#test-service)\n",
|
||||
"* [Clean-up](#cleanup)\n",
|
||||
"* [Appendix](#appendix)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"setup\"></a>\n",
|
||||
"## 1. Setup Environment\n",
|
||||
"#### 1.a. Please set up your environment as described in the [Quickstart](./accelerated-models-quickstart.ipynb), meaning:\n",
|
||||
"* Make sure your Workspace config.json exists and has the correct info\n",
|
||||
"* Install Tensorflow\n",
|
||||
"\n",
|
||||
"#### 1.b. Download dataset into ~/catsanddogs \n",
|
||||
"The dataset we will be using for training can be downloaded [here](https://www.microsoft.com/en-us/download/details.aspx?id=54765). Download the zip and extract to a directory named 'catsanddogs' under your user directory (\"~/catsanddogs\"). \n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 1.c. Import packages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import tensorflow as tf\n",
|
||||
"import numpy as np\n",
|
||||
"from keras import backend as K\n",
|
||||
"import sklearn\n",
|
||||
"import tqdm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 1.d. Create directories for later use\n",
|
||||
"After you train your model in float32, you'll write the weights to a place on disk. We also need a location to store the models that get downloaded."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"custom_weights_dir = os.path.expanduser(\"~/custom-weights\")\n",
|
||||
"saved_model_dir = os.path.expanduser(\"~/models\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"prepare-data\"></a>\n",
|
||||
"## 2. Prepare Data\n",
|
||||
"Load the files we are going to use for training and testing. By default this notebook uses only a very small subset of the Cats and Dogs dataset. That makes it run relatively quickly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import glob\n",
|
||||
"import imghdr\n",
|
||||
"datadir = os.path.expanduser(\"~/catsanddogs\")\n",
|
||||
"\n",
|
||||
"cat_files = glob.glob(os.path.join(datadir, 'PetImages', 'Cat', '*.jpg'))\n",
|
||||
"dog_files = glob.glob(os.path.join(datadir, 'PetImages', 'Dog', '*.jpg'))\n",
|
||||
"\n",
|
||||
"# Limit the data set to make the notebook execute quickly.\n",
|
||||
"cat_files = cat_files[:64]\n",
|
||||
"dog_files = dog_files[:64]\n",
|
||||
"\n",
|
||||
"# The data set has a few images that are not jpeg. Remove them.\n",
|
||||
"cat_files = [f for f in cat_files if imghdr.what(f) == 'jpeg']\n",
|
||||
"dog_files = [f for f in dog_files if imghdr.what(f) == 'jpeg']\n",
|
||||
"\n",
|
||||
"if(not len(cat_files) or not len(dog_files)):\n",
|
||||
" print(\"Please download the Kaggle Cats and Dogs dataset form https://www.microsoft.com/en-us/download/details.aspx?id=54765 and extract the zip to \" + datadir) \n",
|
||||
" raise ValueError(\"Data not found\")\n",
|
||||
"else:\n",
|
||||
" print(cat_files[0])\n",
|
||||
" print(dog_files[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Construct a numpy array as labels\n",
|
||||
"image_paths = cat_files + dog_files\n",
|
||||
"total_files = len(cat_files) + len(dog_files)\n",
|
||||
"labels = np.zeros(total_files)\n",
|
||||
"labels[len(cat_files):] = 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Split images data as training data and test data\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"onehot_labels = np.array([[0,1] if i else [1,0] for i in labels])\n",
|
||||
"img_train, img_test, label_train, label_test = train_test_split(image_paths, onehot_labels, random_state=42, shuffle=True)\n",
|
||||
"\n",
|
||||
"print(len(img_train), len(img_test), label_train.shape, label_test.shape)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"construct-model\"></a>\n",
|
||||
"## 3. Construct Model\n",
|
||||
"We will define the functions to handle creating the preprocessor and the classifier first, and then run them together to actually construct the model with the Resnet50 featurizer in a single Tensorflow session in a separate cell.\n",
|
||||
"\n",
|
||||
"We use ResNet50 for the featurizer and build our own classifier using Keras layers. We train the featurizer and the classifier as one model. We will provide parameters to determine whether we are using the quantized version and whether we are using custom weights in training or not."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 3.a. Define image preprocessing step\n",
|
||||
"Same as in the Quickstart, before passing image dataset to the ResNet50 featurizer, we need to preprocess the input file to get it into the form expected by ResNet50. ResNet50 expects float tensors representing the images in BGR, channel last order. We've provided a default implementation of the preprocessing that you can use.\n",
|
||||
"\n",
|
||||
"**Note:** Expect to see TF deprecation warnings until we port our SDK over to use Tensorflow 2.0."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.accel.models.utils as utils\n",
|
||||
"\n",
|
||||
"def preprocess_images(scaling_factor=1.0):\n",
|
||||
" # Convert images to 3D tensors [width,height,channel] - channels are in BGR order.\n",
|
||||
" in_images = tf.placeholder(tf.string)\n",
|
||||
" image_tensors = utils.preprocess_array(in_images, 'RGB', scaling_factor)\n",
|
||||
" return in_images, image_tensors"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 3.b. Define classifier\n",
|
||||
"We use Keras layer APIs to construct the classifier. Because we're using the tensorflow backend, we can train this classifier in one session with our Resnet50 model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def construct_classifier(in_tensor, seed=None):\n",
|
||||
" from keras.layers import Dropout, Dense, Flatten\n",
|
||||
" from keras.initializers import glorot_uniform\n",
|
||||
" K.set_session(tf.get_default_session())\n",
|
||||
"\n",
|
||||
" FC_SIZE = 1024\n",
|
||||
" NUM_CLASSES = 2\n",
|
||||
"\n",
|
||||
" x = Dropout(0.2, input_shape=(1, 1, int(in_tensor.shape[3]),), seed=seed)(in_tensor)\n",
|
||||
" x = Dense(FC_SIZE, activation='relu', input_dim=(1, 1, int(in_tensor.shape[3]),),\n",
|
||||
" kernel_initializer=glorot_uniform(seed=seed), bias_initializer='zeros')(x)\n",
|
||||
" x = Flatten()(x)\n",
|
||||
" preds = Dense(NUM_CLASSES, activation='softmax', input_dim=FC_SIZE, name='classifier_output',\n",
|
||||
" kernel_initializer=glorot_uniform(seed=seed), bias_initializer='zeros')(x)\n",
|
||||
" return preds"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 3.c. Define model construction\n",
|
||||
"Now that the preprocessor and classifier for the model are defined, we can define how we want to construct the model. \n",
|
||||
"\n",
|
||||
"Constructing the model has these steps: \n",
|
||||
"1. Get preprocessing steps\n",
|
||||
"* Get featurizer using the Azure ML Accel Models SDK:\n",
|
||||
" * import the graph definition\n",
|
||||
" * restore the weights of the model into a Tensorflow session\n",
|
||||
"* Get classifier\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def construct_model(quantized, starting_weights_directory = None):\n",
|
||||
" from azureml.accel.models import Resnet50, QuantizedResnet50\n",
|
||||
" \n",
|
||||
" # Convert images to 3D tensors [width,height,channel]\n",
|
||||
" in_images, image_tensors = preprocess_images(1.0)\n",
|
||||
"\n",
|
||||
" # Construct featurizer using quantized or unquantized ResNet50 model\n",
|
||||
" if not quantized:\n",
|
||||
" featurizer = Resnet50(saved_model_dir)\n",
|
||||
" else:\n",
|
||||
" featurizer = QuantizedResnet50(saved_model_dir, custom_weights_directory = starting_weights_directory)\n",
|
||||
"\n",
|
||||
" features = featurizer.import_graph_def(input_tensor=image_tensors)\n",
|
||||
" \n",
|
||||
" # Construct classifier\n",
|
||||
" preds = construct_classifier(features)\n",
|
||||
" \n",
|
||||
" # Initialize weights\n",
|
||||
" sess = tf.get_default_session()\n",
|
||||
" tf.global_variables_initializer().run()\n",
|
||||
"\n",
|
||||
" featurizer.restore_weights(sess)\n",
|
||||
"\n",
|
||||
" return in_images, image_tensors, features, preds, featurizer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"train-model\"></a>\n",
|
||||
"## 4. Train Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def read_files(files):\n",
|
||||
" \"\"\" Read files to array\"\"\"\n",
|
||||
" contents = []\n",
|
||||
" for path in files:\n",
|
||||
" with open(path, 'rb') as f:\n",
|
||||
" contents.append(f.read())\n",
|
||||
" return contents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def train_model(preds, in_images, img_train, label_train, is_retrain = False, train_epoch = 10, learning_rate=None):\n",
|
||||
" \"\"\" training model \"\"\"\n",
|
||||
" from keras.objectives import binary_crossentropy\n",
|
||||
" from tqdm import tqdm\n",
|
||||
" \n",
|
||||
" learning_rate = learning_rate if learning_rate else 0.001 if is_retrain else 0.01\n",
|
||||
" \n",
|
||||
" # Specify the loss function\n",
|
||||
" in_labels = tf.placeholder(tf.float32, shape=(None, 2)) \n",
|
||||
" cross_entropy = tf.reduce_mean(binary_crossentropy(in_labels, preds))\n",
|
||||
" optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\n",
|
||||
"\n",
|
||||
" def chunks(a, b, n):\n",
|
||||
" \"\"\"Yield successive n-sized chunks from a and b.\"\"\"\n",
|
||||
" if (len(a) != len(b)):\n",
|
||||
" print(\"a and b are not equal in chunks(a,b,n)\")\n",
|
||||
" raise ValueError(\"Parameter error\")\n",
|
||||
"\n",
|
||||
" for i in range(0, len(a), n):\n",
|
||||
" yield a[i:i + n], b[i:i + n]\n",
|
||||
"\n",
|
||||
" chunk_size = 16\n",
|
||||
" chunk_num = len(label_train) / chunk_size\n",
|
||||
"\n",
|
||||
" sess = tf.get_default_session()\n",
|
||||
" for epoch in range(train_epoch):\n",
|
||||
" avg_loss = 0\n",
|
||||
" for img_chunk, label_chunk in tqdm(chunks(img_train, label_train, chunk_size)):\n",
|
||||
" contents = read_files(img_chunk)\n",
|
||||
" _, loss = sess.run([optimizer, cross_entropy],\n",
|
||||
" feed_dict={in_images: contents,\n",
|
||||
" in_labels: label_chunk,\n",
|
||||
" K.learning_phase(): 1})\n",
|
||||
" avg_loss += loss / chunk_num\n",
|
||||
" print(\"Epoch:\", (epoch + 1), \"loss = \", \"{:.3f}\".format(avg_loss))\n",
|
||||
" \n",
|
||||
" # Reach desired performance\n",
|
||||
" if (avg_loss < 0.001):\n",
|
||||
" break"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"test-model\"></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"test-model\"></a>\n",
|
||||
"## 5. Test Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def test_model(preds, in_images, img_test, label_test):\n",
|
||||
" \"\"\"Test the model\"\"\"\n",
|
||||
" from keras.metrics import categorical_accuracy\n",
|
||||
"\n",
|
||||
" in_labels = tf.placeholder(tf.float32, shape=(None, 2))\n",
|
||||
" accuracy = tf.reduce_mean(categorical_accuracy(in_labels, preds))\n",
|
||||
" contents = read_files(img_test)\n",
|
||||
"\n",
|
||||
" accuracy = accuracy.eval(feed_dict={in_images: contents,\n",
|
||||
" in_labels: label_test,\n",
|
||||
" K.learning_phase(): 0})\n",
|
||||
" return accuracy"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"execution\"></a>\n",
|
||||
"## 6. Execute steps\n",
|
||||
"You can run through the Transfer Learning section, then skip to Create AccelContainerImage. By default, because the custom weights section takes much longer for training twice, it is not saved as executable cells. You can copy the code or change cell type to 'Code'.\n",
|
||||
"\n",
|
||||
"<a id=\"transfer-learning\"></a>\n",
|
||||
"### 6.a. Training using Transfer Learning"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Launch the training\n",
|
||||
"tf.reset_default_graph()\n",
|
||||
"sess = tf.Session(graph=tf.get_default_graph())\n",
|
||||
"\n",
|
||||
"with sess.as_default():\n",
|
||||
" in_images, image_tensors, features, preds, featurizer = construct_model(quantized=True)\n",
|
||||
" train_model(preds, in_images, img_train, label_train, is_retrain=False, train_epoch=10, learning_rate=0.01) \n",
|
||||
" accuracy = test_model(preds, in_images, img_test, label_test) \n",
|
||||
" print(\"Accuracy:\", accuracy)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Save Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_name = 'resnet50-catsanddogs-tl'\n",
|
||||
"model_save_path = os.path.join(saved_model_dir, model_name)\n",
|
||||
"\n",
|
||||
"tf.saved_model.simple_save(sess, model_save_path,\n",
|
||||
" inputs={'images': in_images},\n",
|
||||
" outputs={'output_alias': preds})\n",
|
||||
"\n",
|
||||
"input_tensors = in_images.name\n",
|
||||
"output_tensors = preds.name\n",
|
||||
"\n",
|
||||
"print(input_tensors)\n",
|
||||
"print(output_tensors)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"custom-weights\"></a>\n",
|
||||
"### 6.b. Traning using Custom Weights\n",
|
||||
"\n",
|
||||
"Because the quantized graph defintion and the float32 graph defintion share the same node names in the graph definitions, we can initally train the weights in float32, and then reload them with the quantized operations (which take longer) to fine-tune the model.\n",
|
||||
"\n",
|
||||
"First we train the model with custom weights but without quantization. Training is done with native float precision (32-bit floats). We load the training data set and batch the training with 10 epochs. When the performance reaches desired level or starts decredation, we stop the training iteration and save the weights as tensorflow checkpoint files. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Launch the training\n",
|
||||
"```\n",
|
||||
"tf.reset_default_graph()\n",
|
||||
"sess = tf.Session(graph=tf.get_default_graph())\n",
|
||||
"\n",
|
||||
"with sess.as_default():\n",
|
||||
" in_images, image_tensors, features, preds, featurizer = construct_model(quantized=False)\n",
|
||||
" train_model(preds, in_images, img_train, label_train, is_retrain=False, train_epoch=10) \n",
|
||||
" accuracy = test_model(preds, in_images, img_test, label_test) \n",
|
||||
" print(\"Accuracy:\", accuracy)\n",
|
||||
" featurizer.save_weights(custom_weights_dir + \"/rn50\", tf.get_default_session())\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Test Model\n",
|
||||
"After training, we evaluate the trained model's accuracy on test dataset with quantization. So that we know the model's performance if it is deployed on the FPGA."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```\n",
|
||||
"tf.reset_default_graph()\n",
|
||||
"sess = tf.Session(graph=tf.get_default_graph())\n",
|
||||
"\n",
|
||||
"with sess.as_default():\n",
|
||||
" print(\"Testing trained model with quantization\")\n",
|
||||
" in_images, image_tensors, features, preds, quantized_featurizer = construct_model(quantized=True, starting_weights_directory=custom_weights_dir)\n",
|
||||
" accuracy = test_model(preds, in_images, img_test, label_test) \n",
|
||||
" print(\"Accuracy:\", accuracy)\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Fine-Tune Model\n",
|
||||
"Sometimes, the model's accuracy can drop significantly after quantization. In those cases, we need to retrain the model enabled with quantization to get better model accuracy."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```\n",
|
||||
"if (accuracy < 0.93):\n",
|
||||
" with sess.as_default():\n",
|
||||
" print(\"Fine-tuning model with quantization\")\n",
|
||||
" train_model(preds, in_images, img_train, label_train, is_retrain=True, train_epoch=10)\n",
|
||||
" accuracy = test_model(preds, in_images, img_test, label_test) \n",
|
||||
" print(\"Accuracy:\", accuracy)\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Save Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```\n",
|
||||
"model_name = 'resnet50-catsanddogs-cw'\n",
|
||||
"model_save_path = os.path.join(saved_model_dir, model_name)\n",
|
||||
"\n",
|
||||
"tf.saved_model.simple_save(sess, model_save_path,\n",
|
||||
" inputs={'images': in_images},\n",
|
||||
" outputs={'output_alias': preds})\n",
|
||||
"\n",
|
||||
"input_tensors = in_images.name\n",
|
||||
"output_tensors = preds.name\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"create-image\"></a>\n",
|
||||
"## 7. Create AccelContainerImage\n",
|
||||
"\n",
|
||||
"Below we will execute all the same steps as in the [Quickstart](./accelerated-models-quickstart.ipynb#create-image) to package the model we have saved locally into an accelerated Docker image saved in our workspace. To complete all the steps, it may take a few minutes. For more details on each step, check out the [Quickstart section on model registration](./accelerated-models-quickstart.ipynb#register-model)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"from azureml.core.image import Image\n",
|
||||
"from azureml.accel import AccelOnnxConverter\n",
|
||||
"from azureml.accel import AccelContainerImage\n",
|
||||
"\n",
|
||||
"# Retrieve workspace\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print(\"Successfully retrieved workspace:\", ws.name, ws.resource_group, ws.location, ws.subscription_id, '\\n')\n",
|
||||
"\n",
|
||||
"# Register model\n",
|
||||
"registered_model = Model.register(workspace = ws,\n",
|
||||
" model_path = model_save_path,\n",
|
||||
" model_name = model_name)\n",
|
||||
"print(\"Successfully registered: \", registered_model.name, registered_model.description, registered_model.version, '\\n', sep = '\\t')\n",
|
||||
"\n",
|
||||
"# Convert model\n",
|
||||
"convert_request = AccelOnnxConverter.convert_tf_model(ws, registered_model, input_tensors, output_tensors)\n",
|
||||
"# If it fails, you can run wait_for_completion again with show_output=True.\n",
|
||||
"convert_request.wait_for_completion(show_output=False)\n",
|
||||
"converted_model = convert_request.result\n",
|
||||
"print(\"\\nSuccessfully converted: \", converted_model.name, converted_model.url, converted_model.version, \n",
|
||||
" converted_model.id, converted_model.created_time, '\\n')\n",
|
||||
"\n",
|
||||
"# Package into AccelContainerImage\n",
|
||||
"image_config = AccelContainerImage.image_configuration()\n",
|
||||
"# Image name must be lowercase\n",
|
||||
"image_name = \"{}-image\".format(model_name)\n",
|
||||
"image = Image.create(name = image_name,\n",
|
||||
" models = [converted_model],\n",
|
||||
" image_config = image_config, \n",
|
||||
" workspace = ws)\n",
|
||||
"image.wait_for_creation()\n",
|
||||
"print(\"Created AccelContainerImage: {} {} {}\\n\".format(image.name, image.creation_state, image.image_location))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"deploy-image\"></a>\n",
|
||||
"## 8. Deploy image\n",
|
||||
"Once you have an Azure ML Accelerated Image in your Workspace, you can deploy it to two destinations, to a Databox Edge machine or to an AKS cluster. \n",
|
||||
"\n",
|
||||
"### 8.a. Deploy to Databox Edge Machine using IoT Hub\n",
|
||||
"See the sample [here](https://github.com/Azure-Samples/aml-real-time-ai/) for using the Azure IoT CLI extension for deploying your Docker image to your Databox Edge Machine.\n",
|
||||
"\n",
|
||||
"### 8.b. Deploy to AKS Cluster"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Create AKS ComputeTarget"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import AksCompute, ComputeTarget\n",
|
||||
"\n",
|
||||
"# Uses the specific FPGA enabled VM (sku: Standard_PB6s)\n",
|
||||
"# Authentication is enabled by default, but for testing we specify False\n",
|
||||
"prov_config = AksCompute.provisioning_configuration(vm_size = \"Standard_PB6s\",\n",
|
||||
" agent_count = 1)\n",
|
||||
"\n",
|
||||
"aks_name = 'my-aks-pb6-training'\n",
|
||||
"# Create the cluster\n",
|
||||
"aks_target = ComputeTarget.create(workspace = ws, \n",
|
||||
" name = aks_name, \n",
|
||||
" provisioning_configuration = prov_config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Provisioning an AKS cluster might take awhile (15 or so minutes), and we want to wait until it's successfully provisioned before we can deploy a service to it. If you interrupt this cell, provisioning of the cluster will continue. You can re-run it or check the status in your Workspace under Compute."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"aks_target.wait_for_completion(show_output = True)\n",
|
||||
"print(aks_target.provisioning_state)\n",
|
||||
"print(aks_target.provisioning_errors)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Deploy AccelContainerImage to AKS ComputeTarget"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.webservice import Webservice, AksWebservice\n",
|
||||
"\n",
|
||||
"# Set the web service configuration (for creating a test service, we don't want autoscale enabled)\n",
|
||||
"aks_config = AksWebservice.deploy_configuration(autoscale_enabled=False,\n",
|
||||
" num_replicas=1,\n",
|
||||
" auth_enabled = False)\n",
|
||||
"\n",
|
||||
"aks_service_name ='my-aks-service'\n",
|
||||
"\n",
|
||||
"aks_service = Webservice.deploy_from_image(workspace = ws,\n",
|
||||
" name = aks_service_name,\n",
|
||||
" image = image,\n",
|
||||
" deployment_config = aks_config,\n",
|
||||
" deployment_target = aks_target)\n",
|
||||
"aks_service.wait_for_deployment(show_output = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"test-service\"></a>\n",
|
||||
"## 9. Test the service\n",
|
||||
"\n",
|
||||
"<a id=\"create-client\"></a>\n",
|
||||
"### 9.a. Create Client\n",
|
||||
"The image supports gRPC and the TensorFlow Serving \"predict\" API. We have a client that can call into the docker image to get predictions. \n",
|
||||
"\n",
|
||||
"**Note:** If you chose to use auth_enabled=True when creating your AksWebservice.deploy_configuration(), see documentation [here](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice(class)?view=azure-ml-py#get-keys--) on how to retrieve your keys and use either key as an argument to PredictionClient(...,access_token=key)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Using the grpc client in AzureML Accelerated Models SDK\n",
|
||||
"from azureml.accel.client import PredictionClient\n",
|
||||
"\n",
|
||||
"address = aks_service.scoring_uri\n",
|
||||
"ssl_enabled = address.startswith(\"https\")\n",
|
||||
"address = address[address.find('/')+2:].strip('/')\n",
|
||||
"port = 443 if ssl_enabled else 80\n",
|
||||
"\n",
|
||||
"# Initialize AzureML Accelerated Models client\n",
|
||||
"client = PredictionClient(address=address,\n",
|
||||
" port=port,\n",
|
||||
" use_ssl=ssl_enabled,\n",
|
||||
" service_name=aks_service.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"serve-model\"></a>\n",
|
||||
"### 9.b. Serve the model\n",
|
||||
"Let's see how our service does on a few images. It may get a few wrong."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Specify an image to classify\n",
|
||||
"print('CATS')\n",
|
||||
"for image_file in cat_files[:8]:\n",
|
||||
" results = client.score_file(path=image_file, \n",
|
||||
" input_name=input_tensors, \n",
|
||||
" outputs=output_tensors)\n",
|
||||
" result = 'CORRECT ' if results[0] > results[1] else 'WRONG '\n",
|
||||
" print(result + str(results))\n",
|
||||
"print('DOGS')\n",
|
||||
"for image_file in dog_files[:8]:\n",
|
||||
" results = client.score_file(path=image_file, \n",
|
||||
" input_name=input_tensors, \n",
|
||||
" outputs=output_tensors)\n",
|
||||
" result = 'CORRECT ' if results[1] > results[0] else 'WRONG '\n",
|
||||
" print(result + str(results))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"cleanup\"></a>\n",
|
||||
"## 10. Cleanup\n",
|
||||
"It's important to clean up your resources, so that you won't incur unnecessary costs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"aks_service.delete()\n",
|
||||
"aks_target.delete()\n",
|
||||
"image.delete()\n",
|
||||
"registered_model.delete()\n",
|
||||
"converted_model.delete()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id=\"appendix\"></a>\n",
|
||||
"## 11. Appendix"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"License for plot_confusion_matrix:\n",
|
||||
"\n",
|
||||
"New BSD License\n",
|
||||
"\n",
|
||||
"Copyright (c) 2007-2018 The scikit-learn developers.\n",
|
||||
"All rights reserved.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Redistribution and use in source and binary forms, with or without\n",
|
||||
"modification, are permitted provided that the following conditions are met:\n",
|
||||
"\n",
|
||||
" a. Redistributions of source code must retain the above copyright notice,\n",
|
||||
" this list of conditions and the following disclaimer.\n",
|
||||
" b. Redistributions in binary form must reproduce the above copyright\n",
|
||||
" notice, this list of conditions and the following disclaimer in the\n",
|
||||
" documentation and/or other materials provided with the distribution.\n",
|
||||
" c. Neither the name of the Scikit-learn Developers nor the names of\n",
|
||||
" its contributors may be used to endorse or promote products\n",
|
||||
" derived from this software without specific prior written\n",
|
||||
" permission. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n",
|
||||
"AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n",
|
||||
"IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n",
|
||||
"ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR\n",
|
||||
"ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n",
|
||||
"DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n",
|
||||
"SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n",
|
||||
"CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n",
|
||||
"LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n",
|
||||
"OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n",
|
||||
"DAMAGE.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "coverste"
|
||||
},
|
||||
{
|
||||
"name": "paledger"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
BIN
how-to-use-azureml/deployment/accelerated-models/meeting.jpg
Normal file
BIN
how-to-use-azureml/deployment/accelerated-models/meeting.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 74 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 79 KiB |
@@ -25,6 +25,13 @@
|
||||
"3. Build new image and deploy it. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -488,4 +495,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,6 +24,13 @@
|
||||
"4. Build new image and deploy it. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -468,4 +475,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
# ONNX on Azure Machine Learning
|
||||

|
||||
|
||||
These tutorials show how to create and deploy Open Neural Network eXchange ([ONNX](http://onnx.ai)) models in Azure Machine Learning environments using [ONNX Runtime](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-build-deploy-onnx) for inference. Once deployed as a web service, you can ping the model with your own set of images to be analyzed!
|
||||
|
||||
## Tutorials
|
||||
|
||||
0. [Configure your Azure Machine Learning Workspace](../../../configuration.ipynb)
|
||||
0. If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, [Configure your Azure Machine Learning Workspace](../../../configuration.ipynb)
|
||||
|
||||
#### Obtain pretrained models from the [ONNX Model Zoo](https://github.com/onnx/models) and deploy with ONNX Runtime
|
||||
1. [MNIST - Handwritten Digit Classification with ONNX Runtime](onnx-inference-mnist-deploy.ipynb)
|
||||
@@ -34,3 +35,5 @@ Licensed under the MIT License.
|
||||
|
||||
## Acknowledgements
|
||||
These tutorials were developed by Vinitra Swamy and Prasanth Pulavarthi of the Microsoft AI Frameworks team and adapted for presentation at Microsoft Ignite 2018.
|
||||
|
||||
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -33,7 +40,7 @@
|
||||
"To make the best use of your time, make sure you have done the following:\n",
|
||||
"\n",
|
||||
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
|
||||
"* Go through the [configuration](../../../configuration.ipynb) notebook to:\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook to:\n",
|
||||
" * install the AML SDK\n",
|
||||
" * create a workspace and its configuration file (config.json)"
|
||||
]
|
||||
@@ -433,4 +440,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,13 @@
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -34,7 +41,7 @@
|
||||
"## Prerequisites\n",
|
||||
"\n",
|
||||
"### 1. Install Azure ML SDK and create a new workspace\n",
|
||||
"Please follow [Azure ML configuration notebook](../../../configuration.ipynb) to set up your environment.\n",
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, please follow [Azure ML configuration notebook](../../../configuration.ipynb) to set up your environment.\n",
|
||||
"\n",
|
||||
"### 2. Install additional packages needed for this Notebook\n",
|
||||
"You need to install the popular plotting library `matplotlib`, the image manipulation library `opencv`, and the `onnx` library in the conda environment where Azure Maching Learning SDK is installed.\n",
|
||||
@@ -806,4 +813,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -34,7 +41,7 @@
|
||||
"## Prerequisites\n",
|
||||
"\n",
|
||||
"### 1. Install Azure ML SDK and create a new workspace\n",
|
||||
"Please follow [Azure ML configuration notebook](../../../configuration.ipynb) to set up your environment.\n",
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, please follow [Azure ML configuration notebook](../../../configuration.ipynb) to set up your environment.\n",
|
||||
"\n",
|
||||
"### 2. Install additional packages needed for this tutorial notebook\n",
|
||||
"You need to install the popular plotting library `matplotlib`, the image manipulation library `opencv`, and the `onnx` library in the conda environment where Azure Maching Learning SDK is installed. \n",
|
||||
@@ -810,4 +817,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -33,7 +40,7 @@
|
||||
"To make the best use of your time, make sure you have done the following:\n",
|
||||
"\n",
|
||||
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
|
||||
"* Go through the [configuration notebook](../../../configuration.ipynb) to:\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to:\n",
|
||||
" * install the AML SDK\n",
|
||||
" * create a workspace and its configuration file (config.json)"
|
||||
]
|
||||
@@ -417,4 +424,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -30,7 +37,7 @@
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
|
||||
"* Go through the [configuration notebook](../../../configuration.ipynb) to:\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to:\n",
|
||||
" * install the AML SDK\n",
|
||||
" * create a workspace and its configuration file (`config.json`)"
|
||||
]
|
||||
@@ -663,4 +670,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,407 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Deploying a web service to Azure Kubernetes Service (AKS)\n",
|
||||
"This notebook shows the steps for deploying a service: registering a model, creating an image, provisioning a cluster (one time action), and deploying a service to it. \n",
|
||||
"We then test and delete the service, image and model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"from azureml.core.compute import AksCompute, ComputeTarget\n",
|
||||
"from azureml.core.webservice import Webservice, AksWebservice\n",
|
||||
"from azureml.core.image import Image\n",
|
||||
"from azureml.core.model import Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"print(azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Get workspace\n",
|
||||
"Load existing workspace from the config file info."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Register the model\n",
|
||||
"Register an existing trained model, add descirption and tags. Prior to registering the model, you should have a TensorFlow [Saved Model](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md) in the `resnet50` directory. You can download a [pretrained resnet50](https://github.com/tensorflow/models/tree/master/official/resnet#pre-trained-model) and unpack it to that directory."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Register the model\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"model = Model.register(model_path = \"resnet50\", # this points to a local file\n",
|
||||
" model_name = \"resnet50\", # this is the name the model is registered as\n",
|
||||
" tags = {'area': \"Image classification\", 'type': \"classification\"},\n",
|
||||
" description = \"Image classification trained on Imagenet Dataset\",\n",
|
||||
" workspace = ws)\n",
|
||||
"\n",
|
||||
"print(model.name, model.description, model.version)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Create an image\n",
|
||||
"Create an image using the registered model the script that will load and run the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile score.py\n",
|
||||
"import tensorflow as tf\n",
|
||||
"import numpy as np\n",
|
||||
"import ujson\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"from azureml.contrib.services.aml_request import AMLRequest, rawhttp\n",
|
||||
"from azureml.contrib.services.aml_response import AMLResponse\n",
|
||||
"\n",
|
||||
"def init():\n",
|
||||
" global session\n",
|
||||
" global input_name\n",
|
||||
" global output_name\n",
|
||||
" \n",
|
||||
" session = tf.Session()\n",
|
||||
"\n",
|
||||
" model_path = Model.get_model_path('resnet50')\n",
|
||||
" model = tf.saved_model.loader.load(session, ['serve'], model_path)\n",
|
||||
" if len(model.signature_def['serving_default'].inputs) > 1:\n",
|
||||
" raise ValueError(\"This score.py only supports one input\")\n",
|
||||
" if len(model.signature_def['serving_default'].outputs) > 1:\n",
|
||||
" raise ValueError(\"This score.py only supports one input\")\n",
|
||||
" input_name = [tensor.name for tensor in model.signature_def['serving_default'].inputs.values()][0]\n",
|
||||
" output_name = [tensor.name for tensor in model.signature_def['serving_default'].outputs.values()][0]\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"@rawhttp\n",
|
||||
"def run(request):\n",
|
||||
" if request.method == 'POST':\n",
|
||||
" reqBody = request.get_data(False)\n",
|
||||
" resp = score(reqBody)\n",
|
||||
" return AMLResponse(resp, 200)\n",
|
||||
" if request.method == 'GET':\n",
|
||||
" respBody = str.encode(\"GET is not supported\")\n",
|
||||
" return AMLResponse(respBody, 405)\n",
|
||||
" return AMLResponse(\"bad request\", 500)\n",
|
||||
"\n",
|
||||
"def score(data):\n",
|
||||
" result = session.run(output_name, {input_name: [data]})\n",
|
||||
" return ujson.dumps(result[0])\n",
|
||||
"\n",
|
||||
"if __name__ == \"__main__\":\n",
|
||||
" init()\n",
|
||||
" with open(\"test_image.jpg\", 'rb') as f:\n",
|
||||
" content = f.read()\n",
|
||||
" print(score(content))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.conda_dependencies import CondaDependencies \n",
|
||||
"\n",
|
||||
"myenv = CondaDependencies.create(conda_packages=['tensorflow-gpu==1.12.0','numpy','ujson','azureml-contrib-services'])\n",
|
||||
"\n",
|
||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||
" f.write(myenv.serialize_to_string())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.image import ContainerImage\n",
|
||||
"\n",
|
||||
"image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n",
|
||||
" runtime = \"python\",\n",
|
||||
" conda_file = \"myenv.yml\",\n",
|
||||
" gpu_enabled = True\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"image = ContainerImage.create(name = \"GpuImage\",\n",
|
||||
" # this is the model object\n",
|
||||
" models = [model],\n",
|
||||
" image_config = image_config,\n",
|
||||
" workspace = ws)\n",
|
||||
"\n",
|
||||
"image.wait_for_creation(show_output = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Provision the AKS Cluster\n",
|
||||
"This is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Use the default configuration (can also provide parameters to customize)\n",
|
||||
"prov_config = AksCompute.provisioning_configuration(vm_size=\"Standard_NC6\")\n",
|
||||
"\n",
|
||||
"aks_name = 'my-aks-9' \n",
|
||||
"# Create the cluster\n",
|
||||
"aks_target = ComputeTarget.create(workspace = ws, \n",
|
||||
" name = aks_name, \n",
|
||||
" provisioning_configuration = prov_config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Create AKS Cluster in an existing virtual network (optional)\n",
|
||||
"See code snippet below. Check the documentation [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-enable-virtual-network#use-azure-kubernetes-service) for more details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"'''\n",
|
||||
"from azureml.core.compute import ComputeTarget, AksCompute\n",
|
||||
"\n",
|
||||
"# Create the compute configuration and set virtual network information\n",
|
||||
"config = AksCompute.provisioning_configuration(vm_size=\"Standard_NC6\", location=\"eastus2\")\n",
|
||||
"config.vnet_resourcegroup_name = \"mygroup\"\n",
|
||||
"config.vnet_name = \"mynetwork\"\n",
|
||||
"config.subnet_name = \"default\"\n",
|
||||
"config.service_cidr = \"10.0.0.0/16\"\n",
|
||||
"config.dns_service_ip = \"10.0.0.10\"\n",
|
||||
"config.docker_bridge_cidr = \"172.17.0.1/16\"\n",
|
||||
"\n",
|
||||
"# Create the compute target\n",
|
||||
"aks_target = ComputeTarget.create(workspace = ws,\n",
|
||||
" name = \"myaks\",\n",
|
||||
" provisioning_configuration = config)\n",
|
||||
"'''"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Enable SSL on the AKS Cluster (optional)\n",
|
||||
"See code snippet below. Check the documentation [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-secure-web-service) for more details"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# provisioning_config = AksCompute.provisioning_configuration(ssl_cert_pem_file=\"cert.pem\", ssl_key_pem_file=\"key.pem\", ssl_cname=\"www.contoso.com\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"aks_target.wait_for_completion(show_output = True)\n",
|
||||
"print(aks_target.provisioning_state)\n",
|
||||
"print(aks_target.provisioning_errors)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Optional step: Attach existing AKS cluster\n",
|
||||
"\n",
|
||||
"If you have existing AKS cluster in your Azure subscription, you can attach it to the Workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"'''\n",
|
||||
"# Use the default configuration (can also provide parameters to customize)\n",
|
||||
"resource_id = '/subscriptions/92c76a2f-0e1c-4216-b65e-abf7a3f34c1e/resourcegroups/raymondsdk0604/providers/Microsoft.ContainerService/managedClusters/my-aks-0605d37425356b7d01'\n",
|
||||
"\n",
|
||||
"create_name='my-existing-aks' \n",
|
||||
"# Create the cluster\n",
|
||||
"attach_config = AksCompute.attach_configuration(resource_id=resource_id)\n",
|
||||
"aks_target = ComputeTarget.attach(workspace=ws, name=create_name, attach_configuration=attach_config)\n",
|
||||
"# Wait for the operation to complete\n",
|
||||
"aks_target.wait_for_completion(True)\n",
|
||||
"'''"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Deploy web service to AKS"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Set the web service configuration (using default here)\n",
|
||||
"aks_config = AksWebservice.deploy_configuration()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"aks_service_name ='aks-service-1'\n",
|
||||
"\n",
|
||||
"aks_service = Webservice.deploy_from_image(workspace = ws, \n",
|
||||
" name = aks_service_name,\n",
|
||||
" image = image,\n",
|
||||
" deployment_config = aks_config,\n",
|
||||
" deployment_target = aks_target)\n",
|
||||
"aks_service.wait_for_deployment(show_output = True)\n",
|
||||
"print(aks_service.state)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Test the web service\n",
|
||||
"We test the web sevice by passing the test images content."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"import requests\n",
|
||||
"key1, key2 = aks_service.get_keys()\n",
|
||||
"\n",
|
||||
"headers = {'Content-Type':'application/json', 'Authorization': 'Bearer ' + key1}\n",
|
||||
"test_sampe = open('test_image.jpg', 'rb').read()\n",
|
||||
"resp = requests.post(aks_service.scoring_uri, test_sample, headers=headers)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Clean up\n",
|
||||
"Delete the service, image, model and compute target"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"aks_service.delete()\n",
|
||||
"image.delete()\n",
|
||||
"model.delete()\n",
|
||||
"aks_target.delete()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "aashishb"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -8,6 +8,13 @@
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -467,4 +474,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -34,7 +41,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"Make sure you go through the [configuration](../../../configuration.ipynb) Notebook first if you haven't."
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration](../../../configuration.ipynb) Notebook first if you haven't."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -443,4 +450,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,10 +2,7 @@
|
||||
|
||||
Follow these sample notebooks to learn:
|
||||
|
||||
1. [Explain tabular data](explain-tabular-data): Basic example of explaining model trained on tabular data.
|
||||
2. [Explain local classification](explain-local-sklearn-classification): Explain a scikit-learn classification model.
|
||||
3. [Explain local regression](explain-local-sklearn-regression): Explain a scikit-learn regression model.
|
||||
1. [Explain tabular data locally](explain-tabular-data-local): Basic example of explaining model trained on tabular data.
|
||||
4. [Explain on remote AMLCompute](explain-on-amlcompute): Explain a model on a remote AMLCompute target.
|
||||
5. [Explain classification using Run History](explain-run-history-sklearn-classification): Explain a scikit-learn classification model with Run History.
|
||||
6. [Explain regression using Run History](explain-run-history-sklearn-regression): Explain a scikit-learn regression model with Run History.
|
||||
7. [Explain scikit-learn raw features](explain-sklearn-raw-features): Explain the raw features of a trained scikit-learn model.
|
||||
5. [Explain tabular data with Run History](explain-tabular-data-run-history): Explain a model with Run History.
|
||||
7. [Explain raw features](explain-tabular-data-raw-features): Explain the raw features of a trained model.
|
||||
|
||||
@@ -1,243 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Breast cancer diagnosis classification with scikit-learn (run model explainer locally)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Explain a model with the AML explain-model package\n",
|
||||
"\n",
|
||||
"1. Train a SVM classification model using Scikit-learn\n",
|
||||
"2. Run 'explain_model' with full data in local mode, which doesn't contact any Azure services\n",
|
||||
"3. Run 'explain_model' with summarized data in local mode, which doesn't contact any Azure services"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.datasets import load_breast_cancer\n",
|
||||
"from sklearn import svm\n",
|
||||
"from azureml.explain.model.tabular_explainer import TabularExplainer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 1. Run model explainer locally with full data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load the breast cancer diagnosis data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"breast_cancer_data = load_breast_cancer()\n",
|
||||
"classes = breast_cancer_data.target_names.tolist()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Split data into train and test\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"x_train, x_test, y_train, y_test = train_test_split(breast_cancer_data.data, breast_cancer_data.target, test_size=0.2, random_state=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train a SVM classification model, which you want to explain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"clf = svm.SVC(gamma=0.001, C=100., probability=True)\n",
|
||||
"model = clf.fit(x_train, y_train)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain predictions on your local machine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tabular_explainer = TabularExplainer(model, x_train, features=breast_cancer_data.feature_names, classes=classes)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain overall model predictions (global explanation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Passing in test dataset for evaluation examples - note it must be a representative sample of the original data\n",
|
||||
"# x_train can be passed as well, but with more examples explanations will take longer although they may be more accurate\n",
|
||||
"global_explanation = tabular_explainer.explain_global(x_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Sorted SHAP values\n",
|
||||
"print('ranked global importance values: {}'.format(global_explanation.get_ranked_global_values()))\n",
|
||||
"# Corresponding feature names\n",
|
||||
"print('ranked global importance names: {}'.format(global_explanation.get_ranked_global_names()))\n",
|
||||
"# feature ranks (based on original order of features)\n",
|
||||
"print('global importance rank: {}'.format(global_explanation.global_importance_rank))\n",
|
||||
"# per class feature names\n",
|
||||
"print('ranked per class feature names: {}'.format(global_explanation.get_ranked_per_class_names()))\n",
|
||||
"# per class feature importance values\n",
|
||||
"print('ranked per class feature values: {}'.format(global_explanation.get_ranked_per_class_values()))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dict(zip(global_explanation.get_ranked_global_names(), global_explanation.get_ranked_global_values()))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain overall model predictions as a collection of local (instance-level) explanations"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# feature shap values for all features and all data points in the training data\n",
|
||||
"print('local importance values: {}'.format(global_explanation.local_importance_values))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain local data points (individual instances)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_explanation = tabular_explainer.explain_local(x_test[0,:])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# local feature importance information\n",
|
||||
"local_importance_values = local_explanation.local_importance_values\n",
|
||||
"print('local importance for first instance: {}'.format(local_importance_values[y_test[0]]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print('local importance feature names: {}'.format(list(local_explanation.features)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dict(zip(local_explanation.features, local_explanation.local_importance_values[y_test[0]]))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "wamartin"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,231 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Boston Housing Price Prediction with scikit-learn (run model explainer locally)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Explain a model with the AML explain-model package\n",
|
||||
"\n",
|
||||
"1. Train a GradientBoosting regression model using Scikit-learn\n",
|
||||
"2. Run 'explain_model' with full dataset in local mode, which doesn't contact any Azure services.\n",
|
||||
"3. Run 'explain_model' with summarized dataset in local mode, which doesn't contact any Azure services."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn import datasets\n",
|
||||
"from sklearn.ensemble import GradientBoostingRegressor\n",
|
||||
"from azureml.explain.model.tabular_explainer import TabularExplainer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 1. Run model explainer locally with full data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load the Boston house price data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"boston_data = datasets.load_boston()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Split data into train and test\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"x_train, x_test, y_train, y_test = train_test_split(boston_data.data, boston_data.target, test_size=0.2, random_state=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train a GradientBoosting Regression model, which you want to explain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,\n",
|
||||
" learning_rate=0.1, loss='huber',\n",
|
||||
" random_state=1)\n",
|
||||
"model = clf.fit(x_train, y_train)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain predictions on your local machine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tabular_explainer = TabularExplainer(model, x_train, features = boston_data.feature_names)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain overall model predictions (global explanation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Passing in test dataset for evaluation examples - note it must be a representative sample of the original data\n",
|
||||
"# x_train can be passed as well, but with more examples explanations will take longer although they may be more accurate\n",
|
||||
"global_explanation = tabular_explainer.explain_global(x_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"help(global_explanation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Sorted SHAP values \n",
|
||||
"print('ranked global importance values: {}'.format(global_explanation.get_ranked_global_values()))\n",
|
||||
"# Corresponding feature names\n",
|
||||
"print('ranked global importance names: {}'.format(global_explanation.get_ranked_global_names()))\n",
|
||||
"# feature ranks (based on original order of features)\n",
|
||||
"print('global importance rank: {}'.format(global_explanation.global_importance_rank))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dict(zip(global_explanation.get_ranked_global_names(), global_explanation.get_ranked_global_values()))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain overall model predictions as a collection of local (instance-level) explanations"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# feature shap values for all features and all data points in the training data\n",
|
||||
"print('local importance values: {}'.format(global_explanation.local_importance_values))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain local data points (individual instances)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_explanation = tabular_explainer.explain_local(x_test[0,:])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# local feature importance information\n",
|
||||
"local_importance_values = local_explanation.local_importance_values\n",
|
||||
"print('local importance values: {}'.format(local_importance_values))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "wamartin"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,255 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Breast cancer diagnosis classification with scikit-learn (save model explanations via AML Run History)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Explain a model with the AML explain-model package\n",
|
||||
"\n",
|
||||
"1. Train a SVM classification model using Scikit-learn\n",
|
||||
"2. Run 'explain_model' with AML Run History, which leverages run history service to store and manage the explanation data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.datasets import load_breast_cancer\n",
|
||||
"from sklearn import svm\n",
|
||||
"from azureml.explain.model.tabular_explainer import TabularExplainer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 1. Run model explainer locally with full data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load the breast cancer diagnosis data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"breast_cancer_data = load_breast_cancer()\n",
|
||||
"classes = breast_cancer_data.target_names.tolist()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Split data into train and test\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"x_train, x_test, y_train, y_test = train_test_split(breast_cancer_data.data, breast_cancer_data.target, test_size=0.2, random_state=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train a SVM classification model, which you want to explain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"clf = svm.SVC(gamma=0.001, C=100., probability=True)\n",
|
||||
"model = clf.fit(x_train, y_train)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain predictions on your local machine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tabular_explainer = TabularExplainer(model, x_train, features=breast_cancer_data.feature_names, classes=classes)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain overall model predictions (global explanation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Passing in test dataset for evaluation examples - note it must be a representative sample of the original data\n",
|
||||
"# x_train can be passed as well, but with more examples explanations will take longer although they may be more accurate\n",
|
||||
"global_explanation = tabular_explainer.explain_global(x_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 2. Save Model Explanation With AML Run History"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Experiment, Run\n",
|
||||
"from azureml.explain.model.tabular_explainer import TabularExplainer\n",
|
||||
"from azureml.contrib.explain.model.explanation.explanation_client import ExplanationClient\n",
|
||||
"# Check core SDK version number\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"experiment_name = 'explain_model'\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"run = experiment.start_logging()\n",
|
||||
"client = ExplanationClient.from_run(run)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Uploading model explanation data for storage or visualization in webUX\n",
|
||||
"# The explanation can then be downloaded on any compute\n",
|
||||
"client.upload_model_explanation(global_explanation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get model explanation data\n",
|
||||
"explanation = client.download_model_explanation()\n",
|
||||
"local_importance_values = explanation.local_importance_values\n",
|
||||
"expected_values = explanation.expected_values"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the top k (e.g., 4) most important features with their importance values\n",
|
||||
"explanation = client.download_model_explanation(top_k=4)\n",
|
||||
"global_importance_values = explanation.get_ranked_global_values()\n",
|
||||
"global_importance_names = explanation.get_ranked_global_names()\n",
|
||||
"per_class_names = explanation.get_ranked_per_class_names()[0]\n",
|
||||
"per_class_values = explanation.get_ranked_per_class_values()[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print('per class feature importance values: {}'.format(per_class_values))\n",
|
||||
"print('per class feature importance names: {}'.format(per_class_names))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dict(zip(per_class_names, per_class_values))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "wamartin"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,269 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Boston Housing Price Prediction with scikit-learn (save model explanations via AML Run History)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Explain a model with the AML explain-model package\n",
|
||||
"\n",
|
||||
"1. Train a GradientBoosting regression model using Scikit-learn\n",
|
||||
"2. Run 'explain_model' with AML Run History, which leverages run history service to store and manage the explanation data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Save Model Explanation With AML Run History"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Import Iris dataset\n",
|
||||
"from sklearn import datasets\n",
|
||||
"from sklearn.ensemble import GradientBoostingRegressor\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Experiment, Run\n",
|
||||
"from azureml.explain.model.tabular_explainer import TabularExplainer\n",
|
||||
"from azureml.contrib.explain.model.explanation.explanation_client import ExplanationClient\n",
|
||||
"# Check core SDK version number\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"experiment_name = 'explain_model'\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"run = experiment.start_logging()\n",
|
||||
"client = ExplanationClient.from_run(run)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load the Boston house price data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"boston_data = datasets.load_boston()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Split data into train and test\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"x_train, x_test, y_train, y_test = train_test_split(boston_data.data, boston_data.target, test_size=0.2, random_state=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train a GradientBoosting Regression model, which you want to explain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,\n",
|
||||
" learning_rate=0.1, loss='huber',\n",
|
||||
" random_state=1)\n",
|
||||
"model = clf.fit(x_train, y_train)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain predictions on your local machine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tabular_explainer = TabularExplainer(model, x_train, features=boston_data.feature_names)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain overall model predictions (global explanation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Passing in test dataset for evaluation examples - note it must be a representative sample of the original data\n",
|
||||
"# x_train can be passed as well, but with more examples explanations will take longer although they may be more accurate\n",
|
||||
"global_explanation = tabular_explainer.explain_global(x_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Uploading model explanation data for storage or visualization in webUX\n",
|
||||
"# The explanation can then be downloaded on any compute\n",
|
||||
"client.upload_model_explanation(global_explanation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get model explanation data\n",
|
||||
"explanation = client.download_model_explanation()\n",
|
||||
"local_importance_values = explanation.local_importance_values\n",
|
||||
"expected_values = explanation.expected_values"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Print the values\n",
|
||||
"print('expected values: {}'.format(expected_values))\n",
|
||||
"print('local importance values: {}'.format(local_importance_values))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the top k (e.g., 4) most important features with their importance values\n",
|
||||
"explanation = client.download_model_explanation(top_k=4)\n",
|
||||
"global_importance_values = explanation.get_ranked_global_values()\n",
|
||||
"global_importance_names = explanation.get_ranked_global_names()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print('global importance values: {}'.format(global_importance_values))\n",
|
||||
"print('global importance names: {}'.format(global_importance_names))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain individual instance predictions (local explanation) ##### needs to get updated with the new build"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_explanation = tabular_explainer.explain_local(x_test[0,:])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# local feature importance information\n",
|
||||
"local_importance_values = local_explanation.local_importance_values\n",
|
||||
"print('local importance values: {}'.format(local_importance_values))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "wamartin"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,221 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Summary\n",
|
||||
"From raw data that is a mixture of categoricals and numeric, featurize the categoricals using one hot encoding. Use tabular explainer to get explain object and then get raw feature importances"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Load titanic dataset. Impute missing values by filling both backward and forward since some data is at the first/last row. This is just for illustration and not a recommended way to impute missing data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"titanic_url = ('https://raw.githubusercontent.com/amueller/'\n",
|
||||
" 'scipy-2017-sklearn/091d371/notebooks/datasets/titanic3.csv')\n",
|
||||
"data = pd.read_csv(titanic_url)\n",
|
||||
"# fill missing values\n",
|
||||
"data = data.fillna(method=\"ffill\")\n",
|
||||
"data = data.fillna(method=\"bfill\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data.columns"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Similar to example [here](https://scikit-learn.org/stable/auto_examples/compose/plot_column_transformer_mixed_types.html#sphx-glr-auto-examples-compose-plot-column-transformer-mixed-types-py), use a subset of columns"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"\n",
|
||||
"numeric_features = ['age', 'fare']\n",
|
||||
"categorical_features = ['embarked', 'sex', 'pclass']\n",
|
||||
"\n",
|
||||
"y = data['survived'].values\n",
|
||||
"X = data[categorical_features + numeric_features]\n",
|
||||
"\n",
|
||||
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"One hot encode the categorical features"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.preprocessing import OneHotEncoder\n",
|
||||
"one_enc = OneHotEncoder()\n",
|
||||
"one_enc.fit(X_train[categorical_features])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Columnwise concatenate one hot encoded categoricals and numerical features."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"from scipy import sparse\n",
|
||||
"def get_feats(X):\n",
|
||||
" a = one_enc.transform(X[categorical_features])\n",
|
||||
" b = X[numeric_features]\n",
|
||||
" return sparse.hstack((one_enc.transform(X[categorical_features]), X[numeric_features].values))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Train a logistic regression model on featurized training data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.linear_model import LogisticRegression\n",
|
||||
"\n",
|
||||
"X_train_transformed = get_feats(X_train)\n",
|
||||
"X_test_transformed = get_feats(X_test)\n",
|
||||
"\n",
|
||||
"clf = LogisticRegression(solver='lbfgs', max_iter=200)\n",
|
||||
"clf.fit(X_train_transformed, y_train)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Get feature mapping between raw and generated features. Using the order in which features are concatenated in `get_feats` and using `categories_` in `OneHotEncoder` we are able to compute this mapping."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"raw_feat_mapping = []\n",
|
||||
"start_index = 0\n",
|
||||
"for cat_list in one_enc.categories_:\n",
|
||||
" raw_feat_mapping.append([start_index + i for i in range(len(cat_list))])\n",
|
||||
" start_index += len(cat_list)\n",
|
||||
"for i in range(len(numeric_features)):\n",
|
||||
" raw_feat_mapping.append([start_index])\n",
|
||||
" start_index += 1 "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.explain.model.tabular_explainer import TabularExplainer\n",
|
||||
"\n",
|
||||
"explainer = TabularExplainer(clf, X_train_transformed)\n",
|
||||
"global_explanation = explainer.explain_global(X_test_transformed)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"raw_feat_imps = global_explanation.get_raw_feature_importances(raw_feat_mapping)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"feature_names = categorical_features + numeric_features\n",
|
||||
"sorted_indices = np.argsort(raw_feat_imps)[::-1]\n",
|
||||
"\n",
|
||||
"for i in sorted_indices:\n",
|
||||
" print(\"{}: {}\".format(feature_names[i], raw_feat_imps[i]))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "hichando"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,265 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Breast cancer diagnosis classification with scikit-learn (run model explainer locally)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Explain a model with the AML explain-model package\n",
|
||||
"\n",
|
||||
"1. Train a SVM classification model using Scikit-learn\n",
|
||||
"2. Run 'explain_model' with full data in local mode, which doesn't contact any Azure services\n",
|
||||
"3. Run 'explain_model' with summarized data in local mode, which doesn't contact any Azure services\n",
|
||||
"4. Visualize the global and local explanations with the visualization dashboard."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.datasets import load_breast_cancer\n",
|
||||
"from sklearn import svm\n",
|
||||
"from azureml.explain.model.tabular_explainer import TabularExplainer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 1. Run model explainer locally with full data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load the breast cancer diagnosis data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"breast_cancer_data = load_breast_cancer()\n",
|
||||
"classes = breast_cancer_data.target_names.tolist()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Split data into train and test\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"x_train, x_test, y_train, y_test = train_test_split(breast_cancer_data.data, breast_cancer_data.target, test_size=0.2, random_state=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train a SVM classification model, which you want to explain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"clf = svm.SVC(gamma=0.001, C=100., probability=True)\n",
|
||||
"model = clf.fit(x_train, y_train)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain predictions on your local machine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tabular_explainer = TabularExplainer(model, x_train, features=breast_cancer_data.feature_names, classes=classes)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain overall model predictions (global explanation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Passing in test dataset for evaluation examples - note it must be a representative sample of the original data\n",
|
||||
"# x_train can be passed as well, but with more examples explanations will take longer although they may be more accurate\n",
|
||||
"global_explanation = tabular_explainer.explain_global(x_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Sorted SHAP values\n",
|
||||
"print('ranked global importance values: {}'.format(global_explanation.get_ranked_global_values()))\n",
|
||||
"# Corresponding feature names\n",
|
||||
"print('ranked global importance names: {}'.format(global_explanation.get_ranked_global_names()))\n",
|
||||
"# feature ranks (based on original order of features)\n",
|
||||
"print('global importance rank: {}'.format(global_explanation.global_importance_rank))\n",
|
||||
"# per class feature names\n",
|
||||
"print('ranked per class feature names: {}'.format(global_explanation.get_ranked_per_class_names()))\n",
|
||||
"# per class feature importance values\n",
|
||||
"print('ranked per class feature values: {}'.format(global_explanation.get_ranked_per_class_values()))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dict(zip(global_explanation.get_ranked_global_names(), global_explanation.get_ranked_global_values()))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain overall model predictions as a collection of local (instance-level) explanations"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# feature shap values for all features and all data points in the training data\n",
|
||||
"print('local importance values: {}'.format(global_explanation.local_importance_values))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain local data points (individual instances)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# explain the first member of the test set\n",
|
||||
"instance_num = 0\n",
|
||||
"local_explanation = tabular_explainer.explain_local(x_test[instance_num,:])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# get the prediction for the first member of the test set and explain why model made that prediction\n",
|
||||
"prediction_value = clf.predict(x_test)[instance_num]\n",
|
||||
"\n",
|
||||
"sorted_local_importance_values = local_explanation.get_ranked_local_values()[prediction_value]\n",
|
||||
"sorted_local_importance_names = local_explanation.get_ranked_local_names()[prediction_value]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"dict(zip(sorted_local_importance_names, sorted_local_importance_values))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 2. Load visualization dashboard"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.explain.model.visualize import ExplanationDashboard"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ExplanationDashboard(global_explanation, model, x_test)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "mesameki"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,266 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Iris flower classification with scikit-learn (run model explainer locally)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Explain a model with the AML explain-model package\n",
|
||||
"\n",
|
||||
"1. Train a SVM classification model using Scikit-learn\n",
|
||||
"2. Run 'explain_model' with full data in local mode, which doesn't contact any Azure services\n",
|
||||
"3. Run 'explain_model' with summarized data in local mode, which doesn't contact any Azure services\n",
|
||||
"4. Visualize the global and local explanations with the visualization dashboard."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.datasets import load_iris\n",
|
||||
"from sklearn import svm\n",
|
||||
"from azureml.explain.model.tabular_explainer import TabularExplainer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 1. Run model explainer locally with full data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load the breast cancer diagnosis data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"iris = load_iris()\n",
|
||||
"X = iris['data']\n",
|
||||
"y = iris['target']\n",
|
||||
"classes = iris['target_names']\n",
|
||||
"feature_names = iris['feature_names']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Split data into train and test\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train a SVM classification model, which you want to explain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"clf = svm.SVC(gamma=0.001, C=100., probability=True)\n",
|
||||
"model = clf.fit(x_train, y_train)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain predictions on your local machine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tabular_explainer = TabularExplainer(model, x_train, features = feature_names, classes=classes)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain overall model predictions (global explanation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"global_explanation = tabular_explainer.explain_global(x_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Sorted SHAP values\n",
|
||||
"print('ranked global importance values: {}'.format(global_explanation.get_ranked_global_values()))\n",
|
||||
"# Corresponding feature names\n",
|
||||
"print('ranked global importance names: {}'.format(global_explanation.get_ranked_global_names()))\n",
|
||||
"# feature ranks (based on original order of features)\n",
|
||||
"print('global importance rank: {}'.format(global_explanation.global_importance_rank))\n",
|
||||
"# per class feature names\n",
|
||||
"print('ranked per class feature names: {}'.format(global_explanation.get_ranked_per_class_names()))\n",
|
||||
"# per class feature importance values\n",
|
||||
"print('ranked per class feature values: {}'.format(global_explanation.get_ranked_per_class_values()))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dict(zip(global_explanation.get_ranked_global_names(), global_explanation.get_ranked_global_values()))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain overall model predictions as a collection of local (instance-level) explanations"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# feature shap values for all features and all data points in the training data\n",
|
||||
"print('local importance values: {}'.format(global_explanation.local_importance_values))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain local data points (individual instances)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# explain the first member of the test set\n",
|
||||
"instance_num = 0\n",
|
||||
"local_explanation = tabular_explainer.explain_local(x_test[instance_num,:])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# get the prediction for the first member of the test set and explain why model made that prediction\n",
|
||||
"prediction_value = clf.predict(x_test)[instance_num]\n",
|
||||
"\n",
|
||||
"sorted_local_importance_values = local_explanation.get_ranked_local_values()[prediction_value]\n",
|
||||
"sorted_local_importance_names = local_explanation.get_ranked_local_names()[prediction_value]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"dict(zip(sorted_local_importance_names, sorted_local_importance_values))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load visualization dashboard"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.explain.model.visualize import ExplanationDashboard"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ExplanationDashboard(global_explanation, model, x_test)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "mesameki"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,258 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Boston Housing Price Prediction with scikit-learn (run model explainer locally)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Explain a model with the AML explain-model package\n",
|
||||
"\n",
|
||||
"1. Train a GradientBoosting regression model using Scikit-learn\n",
|
||||
"2. Run 'explain_model' with full dataset in local mode, which doesn't contact any Azure services.\n",
|
||||
"3. Run 'explain_model' with summarized dataset in local mode, which doesn't contact any Azure services.\n",
|
||||
"4. Visualize the global and local explanations with the visualization dashboard."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn import datasets\n",
|
||||
"from sklearn.ensemble import GradientBoostingRegressor\n",
|
||||
"from azureml.explain.model.tabular_explainer import TabularExplainer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 1. Run model explainer locally with full data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load the Boston house price data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"boston_data = datasets.load_boston()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Split data into train and test\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"x_train, x_test, y_train, y_test = train_test_split(boston_data.data, boston_data.target, test_size=0.2, random_state=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train a GradientBoosting Regression model, which you want to explain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"reg = GradientBoostingRegressor(n_estimators=100, max_depth=4,\n",
|
||||
" learning_rate=0.1, loss='huber',\n",
|
||||
" random_state=1)\n",
|
||||
"model = reg.fit(x_train, y_train)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain predictions on your local machine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tabular_explainer = TabularExplainer(model, x_train, features = boston_data.feature_names)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain overall model predictions (global explanation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Passing in test dataset for evaluation examples - note it must be a representative sample of the original data\n",
|
||||
"# x_train can be passed as well, but with more examples explanations will take longer although they may be more accurate\n",
|
||||
"global_explanation = tabular_explainer.explain_global(x_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Sorted SHAP values \n",
|
||||
"print('ranked global importance values: {}'.format(global_explanation.get_ranked_global_values()))\n",
|
||||
"# Corresponding feature names\n",
|
||||
"print('ranked global importance names: {}'.format(global_explanation.get_ranked_global_names()))\n",
|
||||
"# feature ranks (based on original order of features)\n",
|
||||
"print('global importance rank: {}'.format(global_explanation.global_importance_rank))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dict(zip(global_explanation.get_ranked_global_names(), global_explanation.get_ranked_global_values()))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain overall model predictions as a collection of local (instance-level) explanations"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# feature shap values for all features and all data points in the training data\n",
|
||||
"print('local importance values: {}'.format(global_explanation.local_importance_values))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain local data points (individual instances)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_explanation = tabular_explainer.explain_local(x_test[0,:])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# sorted local feature importance information; reflects the original feature order\n",
|
||||
"sorted_local_importance_names = local_explanation.get_ranked_local_names()\n",
|
||||
"sorted_local_importance_values = local_explanation.get_ranked_local_values()\n",
|
||||
"\n",
|
||||
"print('sorted local importance names: {}'.format(sorted_local_importance_names))\n",
|
||||
"print('sorted local importance values: {}'.format(sorted_local_importance_values))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load visualization dashboard"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.explain.model.visualize import ExplanationDashboard"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ExplanationDashboard(global_explanation, model, x_test)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "mesameki"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,288 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Summary\n",
|
||||
"From raw data that is a mixture of categoricals and numeric, featurize the categoricals using one hot encoding. Use tabular explainer to get explain object and then get raw feature importances"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Explain a model with the AML explain-model package on raw features\n",
|
||||
"\n",
|
||||
"1. Train a Logistic Regression model using Scikit-learn\n",
|
||||
"2. Run 'explain_model' with full dataset in local mode, which doesn't contact any Azure services.\n",
|
||||
"3. Run 'explain_model' with summarized dataset in local mode, which doesn't contact any Azure services.\n",
|
||||
"4. Visualize the global and local explanations with the visualization dashboard."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This example needs sklearn-pandas. If it is not installed, uncomment and run the following line."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install sklearn-pandas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.pipeline import Pipeline\n",
|
||||
"from sklearn.impute import SimpleImputer\n",
|
||||
"from sklearn.preprocessing import StandardScaler, OneHotEncoder\n",
|
||||
"from sklearn.linear_model import LogisticRegression\n",
|
||||
"from azureml.explain.model.tabular_explainer import TabularExplainer\n",
|
||||
"from sklearn_pandas import DataFrameMapper\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"titanic_url = ('https://raw.githubusercontent.com/amueller/'\n",
|
||||
" 'scipy-2017-sklearn/091d371/notebooks/datasets/titanic3.csv')\n",
|
||||
"data = pd.read_csv(titanic_url)\n",
|
||||
"# fill missing values\n",
|
||||
"data = data.fillna(method=\"ffill\")\n",
|
||||
"data = data.fillna(method=\"bfill\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 1. Run model explainer locally with full data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Similar to example [here](https://scikit-learn.org/stable/auto_examples/compose/plot_column_transformer_mixed_types.html#sphx-glr-auto-examples-compose-plot-column-transformer-mixed-types-py), use a subset of columns"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"\n",
|
||||
"numeric_features = ['age', 'fare']\n",
|
||||
"categorical_features = ['embarked', 'sex', 'pclass']\n",
|
||||
"\n",
|
||||
"y = data['survived'].values\n",
|
||||
"X = data[categorical_features + numeric_features]\n",
|
||||
"\n",
|
||||
"x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.pipeline import Pipeline\n",
|
||||
"from sklearn.impute import SimpleImputer\n",
|
||||
"from sklearn.preprocessing import StandardScaler, OneHotEncoder\n",
|
||||
"from sklearn_pandas import DataFrameMapper\n",
|
||||
"\n",
|
||||
"# Impute, standardize the numeric features and one-hot encode the categorical features. \n",
|
||||
"\n",
|
||||
"transformations = [\n",
|
||||
" ([\"age\", \"fare\"], Pipeline(steps=[\n",
|
||||
" ('imputer', SimpleImputer(strategy='median')),\n",
|
||||
" ('scaler', StandardScaler())\n",
|
||||
" ])),\n",
|
||||
" ([\"embarked\"], Pipeline(steps=[\n",
|
||||
" (\"imputer\", SimpleImputer(strategy='constant', fill_value='missing')), \n",
|
||||
" (\"encoder\", OneHotEncoder(sparse=False))])),\n",
|
||||
" ([\"sex\", \"pclass\"], OneHotEncoder(sparse=False)) \n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Append classifier to preprocessing pipeline.\n",
|
||||
"# Now we have a full prediction pipeline.\n",
|
||||
"clf = Pipeline(steps=[('preprocessor', DataFrameMapper(transformations)),\n",
|
||||
" ('classifier', LogisticRegression(solver='lbfgs'))])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train a Logistic Regression model, which you want to explain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = clf.fit(x_train, y_train)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain predictions on your local machine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tabular_explainer = TabularExplainer(clf.steps[-1][1], initialization_examples=x_train, features=x_train.columns, transformations=transformations)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Passing in test dataset for evaluation examples - note it must be a representative sample of the original data\n",
|
||||
"# x_train can be passed as well, but with more examples explanations will take longer although they may be more accurate\n",
|
||||
"global_explanation = tabular_explainer.explain_global(x_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sorted_global_importance_values = global_explanation.get_ranked_global_values()\n",
|
||||
"sorted_global_importance_names = global_explanation.get_ranked_global_names()\n",
|
||||
"dict(zip(sorted_global_importance_names, sorted_global_importance_values))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain overall model predictions as a collection of local (instance-level) explanations"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# explain the first member of the test set\n",
|
||||
"local_explanation = tabular_explainer.explain_local(x_test[:1])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# get the prediction for the first member of the test set and explain why model made that prediction\n",
|
||||
"prediction_value = clf.predict(x_test)[0]\n",
|
||||
"\n",
|
||||
"sorted_local_importance_values = local_explanation.get_ranked_local_values()[prediction_value]\n",
|
||||
"sorted_local_importance_names = local_explanation.get_ranked_local_names()[prediction_value]\n",
|
||||
"\n",
|
||||
"# Sorted local SHAP values\n",
|
||||
"print('ranked local importance values: {}'.format(sorted_local_importance_values))\n",
|
||||
"# Corresponding feature names\n",
|
||||
"print('ranked local importance names: {}'.format(sorted_local_importance_names))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 2. Load visualization dashboard"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.explain.model.visualize import ExplanationDashboard"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ExplanationDashboard(global_explanation, model, x_test)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "mesameki"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,262 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Breast cancer diagnosis classification with scikit-learn (save model explanations via AML Run History)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Explain a model with the AML explain-model package\n",
|
||||
"\n",
|
||||
"1. Train a SVM classification model using Scikit-learn\n",
|
||||
"2. Run 'explain_model' with AML Run History, which leverages run history service to store and manage the explanation data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.datasets import load_breast_cancer\n",
|
||||
"from sklearn import svm\n",
|
||||
"from azureml.explain.model.tabular_explainer import TabularExplainer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 1. Run model explainer locally with full data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load the breast cancer diagnosis data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"breast_cancer_data = load_breast_cancer()\n",
|
||||
"classes = breast_cancer_data.target_names.tolist()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Split data into train and test\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"x_train, x_test, y_train, y_test = train_test_split(breast_cancer_data.data, breast_cancer_data.target, test_size=0.2, random_state=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train a SVM classification model, which you want to explain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"clf = svm.SVC(gamma=0.001, C=100., probability=True)\n",
|
||||
"model = clf.fit(x_train, y_train)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain predictions on your local machine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tabular_explainer = TabularExplainer(model, x_train, features=breast_cancer_data.feature_names, classes=classes)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain overall model predictions (global explanation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Passing in test dataset for evaluation examples - note it must be a representative sample of the original data\n",
|
||||
"# x_train can be passed as well, but with more examples explanations will take longer although they may be more accurate\n",
|
||||
"global_explanation = tabular_explainer.explain_global(x_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 2. Save Model Explanation With AML Run History"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Experiment, Run\n",
|
||||
"from azureml.explain.model.tabular_explainer import TabularExplainer\n",
|
||||
"from azureml.contrib.explain.model.explanation.explanation_client import ExplanationClient\n",
|
||||
"# Check core SDK version number\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"experiment_name = 'explain_model'\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"run = experiment.start_logging()\n",
|
||||
"client = ExplanationClient.from_run(run)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Uploading model explanation data for storage or visualization in webUX\n",
|
||||
"# The explanation can then be downloaded on any compute\n",
|
||||
"client.upload_model_explanation(global_explanation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get model explanation data\n",
|
||||
"explanation = client.download_model_explanation()\n",
|
||||
"local_importance_values = explanation.local_importance_values\n",
|
||||
"expected_values = explanation.expected_values"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the top k (e.g., 4) most important features with their importance values\n",
|
||||
"explanation = client.download_model_explanation(top_k=4)\n",
|
||||
"global_importance_values = explanation.get_ranked_global_values()\n",
|
||||
"global_importance_names = explanation.get_ranked_global_names()\n",
|
||||
"per_class_names = explanation.get_ranked_per_class_names()[0]\n",
|
||||
"per_class_values = explanation.get_ranked_per_class_values()[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print('per class feature importance values: {}'.format(per_class_values))\n",
|
||||
"print('per class feature importance names: {}'.format(per_class_names))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dict(zip(per_class_names, per_class_values))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "mesameki"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,276 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Boston Housing Price Prediction with scikit-learn (save model explanations via AML Run History)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Explain a model with the AML explain-model package\n",
|
||||
"\n",
|
||||
"1. Train a GradientBoosting regression model using Scikit-learn\n",
|
||||
"2. Run 'explain_model' with AML Run History, which leverages run history service to store and manage the explanation data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Save Model Explanation With AML Run History"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Import Iris dataset\n",
|
||||
"from sklearn import datasets\n",
|
||||
"from sklearn.ensemble import GradientBoostingRegressor\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Experiment, Run\n",
|
||||
"from azureml.explain.model.tabular_explainer import TabularExplainer\n",
|
||||
"from azureml.contrib.explain.model.explanation.explanation_client import ExplanationClient\n",
|
||||
"# Check core SDK version number\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"experiment_name = 'explain_model'\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"run = experiment.start_logging()\n",
|
||||
"client = ExplanationClient.from_run(run)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load the Boston house price data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"boston_data = datasets.load_boston()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Split data into train and test\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"x_train, x_test, y_train, y_test = train_test_split(boston_data.data, boston_data.target, test_size=0.2, random_state=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train a GradientBoosting Regression model, which you want to explain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,\n",
|
||||
" learning_rate=0.1, loss='huber',\n",
|
||||
" random_state=1)\n",
|
||||
"model = clf.fit(x_train, y_train)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain predictions on your local machine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tabular_explainer = TabularExplainer(model, x_train, features=boston_data.feature_names)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain overall model predictions (global explanation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Passing in test dataset for evaluation examples - note it must be a representative sample of the original data\n",
|
||||
"# x_train can be passed as well, but with more examples explanations will take longer although they may be more accurate\n",
|
||||
"global_explanation = tabular_explainer.explain_global(x_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Uploading model explanation data for storage or visualization in webUX\n",
|
||||
"# The explanation can then be downloaded on any compute\n",
|
||||
"client.upload_model_explanation(global_explanation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get model explanation data\n",
|
||||
"explanation = client.download_model_explanation()\n",
|
||||
"local_importance_values = explanation.local_importance_values\n",
|
||||
"expected_values = explanation.expected_values"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Print the values\n",
|
||||
"print('expected values: {}'.format(expected_values))\n",
|
||||
"print('local importance values: {}'.format(local_importance_values))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the top k (e.g., 4) most important features with their importance values\n",
|
||||
"explanation = client.download_model_explanation(top_k=4)\n",
|
||||
"global_importance_values = explanation.get_ranked_global_values()\n",
|
||||
"global_importance_names = explanation.get_ranked_global_names()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print('global importance values: {}'.format(global_importance_values))\n",
|
||||
"print('global importance names: {}'.format(global_importance_names))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explain individual instance predictions (local explanation) ##### needs to get updated with the new build"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_explanation = tabular_explainer.explain_local(x_test[0,:])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# local feature importance information\n",
|
||||
"local_importance_values = local_explanation.local_importance_values\n",
|
||||
"print('local importance values: {}'.format(local_importance_values))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "mesameki"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,267 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Uncomment these if explanation packages are not already installed in your environment\n",
|
||||
"#!pip install --upgrade azureml-sdk[explain]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Explain a model with the AML explain-model package\n",
|
||||
"\n",
|
||||
"1. Train a SVM model using Scikit-learn\n",
|
||||
"2. Run 'explain_model' in local mode, which doesn't contact any Azure services\n",
|
||||
"3. Run 'explain_model' with AML Run History, which leverages Run History Service to store and manage the explanation data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Disclaimer: this notebook is a preview of model explainability, and the APIs shown below are subject to breaking changes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train a SVM model, which we will try to explain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Import Iris dataset\n",
|
||||
"from sklearn import datasets\n",
|
||||
"iris = datasets.load_iris()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Split data into train and test\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Import scikit learn, fit a SVM model\n",
|
||||
"def create_scikit_learn_model(X, y):\n",
|
||||
" from sklearn import svm\n",
|
||||
" clf = svm.SVC(gamma=0.001, C=100., probability=True)\n",
|
||||
" model = clf.fit(X, y)\n",
|
||||
" return model\n",
|
||||
"model = create_scikit_learn_model(x_train, y_train)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run model explainer locally"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.explain.model.tabular_explainer import TabularExplainer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"start = time.time()\n",
|
||||
"\n",
|
||||
"explainer = TabularExplainer(model, x_train, features=iris.feature_names)\n",
|
||||
"global_explanation = explainer.explain_global(x_test)\n",
|
||||
"\n",
|
||||
"# importance values for each class, test example, and feature (local importance)\n",
|
||||
"local_imp_values = global_explanation.local_importance_values\n",
|
||||
"# base prediction with feature importances ignored\n",
|
||||
"expected_values = global_explanation.expected_values\n",
|
||||
"# global feature importance information\n",
|
||||
"global_imp_values = global_explanation.global_importance_values\n",
|
||||
"ranked_global_imp_names = global_explanation.get_ranked_global_names()\n",
|
||||
"# global per-class feature importance information\n",
|
||||
"per_class_imp_values = global_explanation.per_class_values\n",
|
||||
"ranked_per_class_imp_names = global_explanation.get_ranked_per_class_names()\n",
|
||||
"\n",
|
||||
"end = time.time()\n",
|
||||
"print(end - start)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run model explainer with AML Run History"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Experiment, Run\n",
|
||||
"from azureml.explain.model.tabular_explainer import TabularExplainer\n",
|
||||
"from azureml.contrib.explain.model.explanation.explanation_client import ExplanationClient\n",
|
||||
"# Check core SDK version number\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"experiment_name = 'explain_model'\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"run = experiment.start_logging()\n",
|
||||
"client = ExplanationClient.from_run(run)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"start = time.time()\n",
|
||||
"explainer = TabularExplainer(model, x_train, features=iris.feature_names, classes=iris.target_names)\n",
|
||||
"explanation = explainer.explain_global(x_test)\n",
|
||||
"client.upload_model_explanation(explanation)\n",
|
||||
"end = time.time()\n",
|
||||
"print(end - start)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"explanation_from_run = client.download_model_explanation()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# global feature importance information\n",
|
||||
"global_imp_values = explanation_from_run.global_importance_values\n",
|
||||
"global_imp_names = explanation_from_run.get_ranked_global_names()\n",
|
||||
"# global per-class feature importance information\n",
|
||||
"per_class_imp_values = explanation_from_run.per_class_values\n",
|
||||
"per_class_imp_names = explanation_from_run.get_ranked_per_class_names()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## This visualization is unsupported, and is not guaranteed to work in the future"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the shap values and explore locally\n",
|
||||
"import shap\n",
|
||||
"import numpy as np\n",
|
||||
"shap.initjs()\n",
|
||||
"display(shap.force_plot(explanation_from_run.expected_values[1], np.asarray(explanation_from_run.local_importance_values[1]), x_test))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run.complete()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "wamartin"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -54,3 +54,6 @@ In this directory, there are two types of notebooks:
|
||||
|
||||
1. [pipeline-batch-scoring.ipynb](https://aka.ms/pl-batch-score): This notebook demonstrates how to run a batch scoring job using Azure Machine Learning pipelines.
|
||||
2. [pipeline-style-transfer.ipynb](https://aka.ms/pl-style-trans)
|
||||
|
||||
|
||||

|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
# Introduction to Azure Machine Learning Pipelines
|
||||
|
||||
The following notebooks provide an introduction to a concept in Azure Machine Learning Pipelines. They will introduce you to core Azure Machine Learning Pipelines features.
|
||||
These notebooks below are designed to go in sequence.
|
||||
|
||||
1. [aml-pipelines-getting-started.ipynb](https://aka.ms/pl-get-started): Start with this notebook to understand the concepts of using Azure Machine Learning Pipelines. This notebook will show you how to runs steps in parallel and in sequence.
|
||||
2. [aml-pipelines-with-data-dependency-steps.ipynb](https://aka.ms/pl-data-dep): This notebooks shows how to connect steps in your pipeline using data. Data produced by one step is used by subsequent steps to force an explicit dependency between steps.
|
||||
3. [aml-pipelines-publish-and-run-using-rest-endpoint.ipynb](https://aka.ms/pl-pub-rep): Once you are satisfied with your iterative runs in, you could publish your pipeline to get a REST endpoint which could be invoked from non-Pythons clients as well.
|
||||
4. [aml-pipelines-data-transfer.ipynb](https://aka.ms/pl-data-trans): This notebook shows how you transfer data between supported datastores.
|
||||
5. [aml-pipelines-use-databricks-as-compute-target.ipynb](https://aka.ms/pl-databricks): This notebooks shows how you can use Pipelines to send your compute payload to Azure Databricks.
|
||||
6. [aml-pipelines-use-adla-as-compute-target.ipynb](https://aka.ms/pl-adla): This notebook shows how you can use Azure Data Lake Analytics (ADLA) as a compute target.
|
||||
7. [aml-pipelines-how-to-use-estimatorstep.ipynb](https://aka.ms/pl-estimator): This notebook shows how to use the EstimatorStep.
|
||||
8. [aml-pipelines-parameter-tuning-with-hyperdrive.ipynb](https://aka.ms/pl-hyperdrive): HyperDriveStep in Pipelines shows how you can do hyper parameter tuning using Pipelines.
|
||||
9. [aml-pipelines-how-to-use-azurebatch-to-run-a-windows-executable.ipynb](https://aka.ms/pl-azbatch): AzureBatchStep can be used to run your custom code in AzureBatch cluster.
|
||||
10. [aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb](https://aka.ms/pl-schedule): Once you publish a Pipeline, you can schedule it to trigger based on an interval or on data change in a defined datastore.
|
||||
|
||||
|
||||

|
||||
@@ -8,6 +8,15 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -53,7 +62,7 @@
|
||||
"source": [
|
||||
"## Initialize Workspace\n",
|
||||
"\n",
|
||||
"Initialize a workspace object from persisted configuration. Make sure the config file is present at .\\config.json\n",
|
||||
"Initialize a workspace object from persisted configuration.If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure the config file is present at .\\config.json\n",
|
||||
"\n",
|
||||
"If you don't have a config.json file, please go through the configuration Notebook located here:\n",
|
||||
"https://github.com/Azure/MachineLearningNotebooks. \n",
|
||||
@@ -466,4 +475,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,15 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -37,7 +46,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites and Azure Machine Learning Basics\n",
|
||||
"Make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc. \n"
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc. \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -634,4 +643,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -67,7 +74,7 @@
|
||||
"source": [
|
||||
"Initialize a workspace object from persisted configuration. Make sure the config file is present at .\\config.json\n",
|
||||
"\n",
|
||||
"If you don't have a config.json file, please go through the configuration Notebook located [here](https://github.com/Azure/MachineLearningNotebooks). \n",
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, if you don't have a config.json file, please go through the configuration Notebook located [here](https://github.com/Azure/MachineLearningNotebooks). \n",
|
||||
"\n",
|
||||
"This sets you up with a working config file that has information on your workspace, subscription id, etc. "
|
||||
]
|
||||
@@ -373,4 +380,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -20,7 +27,7 @@
|
||||
"\n",
|
||||
"## Prerequisite:\n",
|
||||
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
|
||||
"* Go through the [configuration notebook](../../../configuration.ipynb) to:\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise,go through the [configuration notebook](../../../configuration.ipynb) to:\n",
|
||||
" * install the AML SDK\n",
|
||||
" * create a workspace and its configuration file (`config.json`)"
|
||||
]
|
||||
@@ -278,4 +285,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -50,7 +57,7 @@
|
||||
"source": [
|
||||
"## Initialize workspace\n",
|
||||
"\n",
|
||||
"Initialize a workspace object from persisted configuration. Make sure the config file is present at .\\config.json"
|
||||
"Initialize a workspace object from persisted configuration. If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure the config file is present at .\\config.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -434,4 +441,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -21,7 +28,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites and Azure Machine Learning Basics\n",
|
||||
"Make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc. \n",
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc. \n",
|
||||
"\n",
|
||||
"### Initialization Steps"
|
||||
]
|
||||
@@ -413,4 +420,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -21,7 +28,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites and AML Basics\n",
|
||||
"Make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc.\n",
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc.\n",
|
||||
"\n",
|
||||
"### Initialization Steps"
|
||||
]
|
||||
@@ -444,4 +451,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -52,7 +59,7 @@
|
||||
"source": [
|
||||
"## Initialize Workspace\n",
|
||||
"\n",
|
||||
"Initialize a workspace object from persisted configuration. Make sure the config file is present at .\\config.json"
|
||||
"Initialize a workspace object from persisted configuration. If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure the config file is present at .\\config.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -364,4 +371,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,13 @@
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -105,7 +112,7 @@
|
||||
"source": [
|
||||
"## Initialize Workspace\n",
|
||||
"\n",
|
||||
"Initialize a workspace object from persisted configuration. Make sure the config file is present at .\\config.json"
|
||||
"Initialize a workspace object from persisted configuration. If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure the config file is present at .\\config.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -705,4 +712,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,13 @@
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -23,7 +30,7 @@
|
||||
"## Introduction\n",
|
||||
"In this example we use the scikit-learn's [digit dataset](http://scikit-learn.org/stable/datasets/index.html#optical-recognition-of-handwritten-digits-dataset) to showcase how you can use AutoML for a simple classification problem.\n",
|
||||
"\n",
|
||||
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
||||
"\n",
|
||||
"In this notebook you would see\n",
|
||||
"1. Create an `Experiment` in an existing `Workspace`.\n",
|
||||
@@ -514,4 +521,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,13 @@
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -21,7 +28,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites and Azure Machine Learning Basics\n",
|
||||
"Make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc. \n",
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc. \n",
|
||||
"\n",
|
||||
"### Azure Machine Learning and Pipeline SDK-specific Imports"
|
||||
]
|
||||
@@ -464,4 +471,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,13 @@
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -28,7 +35,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"Make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc. "
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc. "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -593,4 +600,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -25,7 +32,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"Make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc. "
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwsie, make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc. "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -643,4 +650,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,13 @@
|
||||
" \n",
|
||||
"The interactive authentication is suitable for local experimentation on your own computer. Azure CLI authentication is suitable if you are already using Azure CLI for managing Azure resources, and want to sign in only once. The Service Principal authentication is suitable for automated workflows, for example as part of Azure Devops build."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -250,4 +257,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -309,7 +309,7 @@
|
||||
" entry_script='tf_horovod_word2vec.py',\n",
|
||||
" node_count=2,\n",
|
||||
" distributed_training=MpiConfiguration(),\n",
|
||||
" framework_version='1.12')"
|
||||
" framework_version='1.13')"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -423,7 +423,7 @@
|
||||
" compute_target=compute_target,\n",
|
||||
" entry_script='tf_mnist.py', \n",
|
||||
" use_gpu=True, \n",
|
||||
" framework_version='1.12')"
|
||||
" framework_version='1.13')"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -7,4 +7,6 @@ Follow these sample notebooks to learn:
|
||||
3. [Train on remote VM](train-on-remote-vm): train a model using a remote Azure VM as compute target.
|
||||
4. [Train on AmlCompute](train-on-amlcompute): train a model using an AmlCompute cluster as compute target.
|
||||
5. [Train in an HDI Spark cluster](train-in-spark): train a Spark ML model using an HDInsight Spark cluster as compute target.
|
||||
6. [Logging API](logging-api): experiment with various logging functions to create runs and automatically generate graphs.
|
||||
6. [Logging API](logging-api): experiment with various logging functions to create runs and automatically generate graphs.
|
||||
|
||||

|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -62,7 +69,7 @@
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't. Also make sure you have tqdm and matplotlib installed in the current kernel.\n",
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) Notebook first if you haven't already to establish your connection to the AzureML Workspace. Also make sure you have tqdm and matplotlib installed in the current kernel.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"(myenv) $ conda install -y tqdm matplotlib\n",
|
||||
@@ -527,4 +534,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -45,7 +52,7 @@
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't. Also, if you're new to Azure ML, we recommend that you go through [the tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/service/tutorial-train-models-with-aml) first to learn the basic concepts.\n",
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't. Also, if you're new to Azure ML, we recommend that you go through [the tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/service/tutorial-train-models-with-aml) first to learn the basic concepts.\n",
|
||||
"\n",
|
||||
"Let's first import required packages, check Azure ML SDK version, connect to your workspace and create an Experiment to hold the runs."
|
||||
]
|
||||
@@ -592,4 +599,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -25,7 +32,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"Make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't."
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -275,4 +282,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -31,7 +38,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"Make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't."
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -519,4 +526,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -29,7 +36,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"Make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't."
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -319,9 +326,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit script to run in the system-managed environment\n",
|
||||
"A new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 mninutes. But this conda environment is reused so long as you don't change the conda dependencies.\n",
|
||||
"\n",
|
||||
"\n"
|
||||
"A new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 minutes. But this conda environment is reused so long as you don't change the conda dependencies."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -332,9 +337,9 @@
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"\n",
|
||||
"# Check if Docker is installed and Linux containers are enables\n",
|
||||
"if subprocess.run(\"docker -v\", shell=True) == 0:\n",
|
||||
" out = subprocess.check_output(\"docker system info\", shell=True, encoding=\"ascii\").split(\"\\n\")\n",
|
||||
"# Check if Docker is installed and Linux containers are enabled\n",
|
||||
"if subprocess.run(\"docker -v\", shell=True).returncode == 0:\n",
|
||||
" out = subprocess.check_output(\"docker system info\", shell=True).decode('ascii')\n",
|
||||
" if not \"OSType: linux\" in out:\n",
|
||||
" print(\"Switch Docker engine to use Linux containers.\")\n",
|
||||
" else:\n",
|
||||
@@ -435,6 +440,29 @@
|
||||
"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's compare it to the others"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%matplotlib inline\n",
|
||||
"\n",
|
||||
"import matplotlib\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"plt.plot(metrics['alpha'], metrics['mse'], marker='o')\n",
|
||||
"plt.ylabel(\"MSE\")\n",
|
||||
"plt.xlabel(\"Alpha\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -455,7 +483,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We know the model `ridge_0.40.pkl` is the best performing model from the eariler queries. So let's register it with the workspace."
|
||||
"We know the model `ridge_0.40.pkl` is the best performing model from the earlier queries. So let's register it with the workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -506,9 +534,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -30,7 +37,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"Make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't."
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -110,7 +117,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Upload data files into datastore\n",
|
||||
"Every workspace comes with a default datastore (and you can register more) which is backed by the Azure blob storage account associated with the workspace. We can use it to transfer data from local to the cloud, and access it from the compute target."
|
||||
"Every workspace comes with a default [datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data) (and you can register more) which is backed by the Azure blob storage account associated with the workspace. We can use it to transfer data from local to the cloud, and access it from the compute target."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -236,7 +243,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Configure & Run\n",
|
||||
"First let's create a `DataReferenceConfiguration` object to inform the system what data folder to download to the copmute target."
|
||||
"First let's create a `DataReferenceConfiguration` object to inform the system what data folder to download to the compute target."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -638,4 +645,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License
|
||||
|
||||
# Very simple script to demonstrate run in environment
|
||||
# Print message passed in as environment variable
|
||||
import os
|
||||
|
||||
print(os.environ.get("MESSAGE"))
|
||||
@@ -0,0 +1,364 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Using environments\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Create environment](#Create-environment)\n",
|
||||
" 1. Add Python packages\n",
|
||||
" 1. Specify environment variables\n",
|
||||
"1. [Submit run using environment](#Submit-run-using-environment)\n",
|
||||
"1. [Register environment](#Register-environment)\n",
|
||||
"1. [List and get existing environments](#List-and-get-existing-environments)\n",
|
||||
"1. [Other ways to create environments](#Other-ways-to-create-environments)\n",
|
||||
" 1. From existing Conda environment\n",
|
||||
" 1. From Conda or pip files\n",
|
||||
"1. [Docker settings](#Docker-settings)\n",
|
||||
"1. [Spark and Azure Databricks settings](#Spark-and-Azure-Databricks-settings)\n",
|
||||
"1. [Next steps](#Next-steps)\n",
|
||||
"\n",
|
||||
"## Introduction\n",
|
||||
"\n",
|
||||
"Azure ML environments are an encapsulation of the environment where your machine learning training happens. They define Python packages, environment variables, Docker settings and other attributes in declarative fashion. Environments are versioned: you can update them and retrieve old versions to revist and review your work.\n",
|
||||
"\n",
|
||||
"Environments allow you to:\n",
|
||||
"* Encapsulate dependencies of your training process, such as Python packages and their versions.\n",
|
||||
"* Reproduce the Python environment on your local computer in a remote run on VM or ML Compute cluster\n",
|
||||
"* Reproduce your experimentation environment in production setting.\n",
|
||||
"* Revisit and audit the environment in which an existing model was trained.\n",
|
||||
"\n",
|
||||
"Environment, compute target and training script together form run configuration: the full specification of training run.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't.\n",
|
||||
"\n",
|
||||
"First, let's validate Azure ML SDK version and connect to workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"print(azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"ws.get_details()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create environment\n",
|
||||
"\n",
|
||||
"You can create an environment by instantiating ```Environment``` object and then setting its attributes: set of Python packages, environment variables and others.\n",
|
||||
"\n",
|
||||
"### Add Python packages\n",
|
||||
"\n",
|
||||
"The recommended way is to specify Conda packages, as they typically come with complete set of pre-built binaries."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Environment\n",
|
||||
"from azureml.core.environment import CondaDependencies\n",
|
||||
"\n",
|
||||
"myenv = Environment(name=\"myenv\")\n",
|
||||
"conda_dep = CondaDependencies()\n",
|
||||
"conda_dep.add_conda_package(\"scikit-learn\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also add pip packages, and specify the version of package"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conda_dep.add_pip_package(\"pillow==5.4.1\")\n",
|
||||
"myenv.python.conda_dependencies=conda_dep"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Specify environment variables\n",
|
||||
"\n",
|
||||
"You can add environment variables to your environment. These then become available using ```os.environ.get``` in your training script."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"myenv.environment_variables = {\"MESSAGE\":\"Hello from Azure Machine Learning\"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Submit run using environment\n",
|
||||
"\n",
|
||||
"When you submit a run, you can specify which environment to use. \n",
|
||||
"\n",
|
||||
"On the first run in given environment, Azure ML spends some time building the environment. On the subsequent runs, Azure ML keeps track of changes and uses the existing environment, resulting in faster run completion."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import ScriptRunConfig, Experiment\n",
|
||||
"\n",
|
||||
"myexp = Experiment(workspace=ws, name = \"environment-example\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To submit a run, create a run configuration that combines the script file and environment, and pass it to ```Experiment.submit```. In this example, the script is submitted to local computer, but you can specify other compute targets such as remote clusters as well."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"runconfig = ScriptRunConfig(source_directory=\"example\", script=\"example.py\")\n",
|
||||
"runconfig.run_config.target = \"local\"\n",
|
||||
"runconfig.run_config.environment = myenv\n",
|
||||
"run = myexp.submit(config=runconfig)\n",
|
||||
"\n",
|
||||
"run.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Register environment\n",
|
||||
"\n",
|
||||
"You can manage environments by registering them. This allows you to track their versions, and reuse them in future runs. For example, once you've constructed an environment that meets your requirements, you can register it and use it in other experiments so as to standardize your workflow.\n",
|
||||
"\n",
|
||||
"If you register the environment with same name, the version number is increased by one. Note that Azure ML keeps track of differences between the version, so if you re-register an identical version, the version number is not increased."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"myenv.register(workspace=ws)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## List and get existing environments\n",
|
||||
"\n",
|
||||
"Your workspace contains a dictionary of registered environments. You can then use ```Environment.get``` to retrieve a specific environment with specific version."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for name,env in ws.environments.items():\n",
|
||||
" print(\"Name {} \\t version {}\".format(name,env.version))\n",
|
||||
"\n",
|
||||
"restored_environment = Environment.get(workspace=ws,name=\"myenv\",version=\"1\")\n",
|
||||
"\n",
|
||||
"print(\"Attributes of restored environment\")\n",
|
||||
"restored_environment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Other ways to create environments\n",
|
||||
"\n",
|
||||
"### From existing Conda environment\n",
|
||||
"\n",
|
||||
"You can create an environment from existing conda environment. This make it easy to reuse your local interactive environment in Azure ML remote runs. For example, if you've created conda environment using\n",
|
||||
"```\n",
|
||||
"conda create -n mycondaenv\n",
|
||||
"```\n",
|
||||
"you can create Azure ML environment out of that conda environment using\n",
|
||||
"```\n",
|
||||
"myenv = Environment.from_existing_conda_environment(name=\"myenv\",conda_environment_name=\"mycondaenv\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"### From conda or pip files\n",
|
||||
"\n",
|
||||
"You can create environments from conda specification or pip requirements files using\n",
|
||||
"```\n",
|
||||
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"path-to-conda-specification-file\")\n",
|
||||
"\n",
|
||||
"myenv = Environment.from_pip_requirements(name=\"myenv\", file_path=\"path-to-pip-requirements-file\")\n",
|
||||
"```\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Docker settings\n",
|
||||
"\n",
|
||||
"Docker container provides an efficient way to encapsulate the dependencies. When you enable Docker, Azure ML builds a Docker image and creates a Python environment within that container, given your specifications. The Docker images are reused: the first run in a new environment typically takes longer as the image is build.\n",
|
||||
"\n",
|
||||
"**Note:** For runs on local computer or attached virtual machine, that computer must have Docker installed and enabled. Machine Learning Compute has Docker pre-installed.\n",
|
||||
"\n",
|
||||
"Attribute ```docker.enabled``` controls whether to use Docker container or host OS for execution. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"myenv.docker.enabled = True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can specify custom Docker base image and registry. This allows you to customize and control in detail the guest OS in which your training run executes. whether to use GPU, whether to use shared volumes, and shm size."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"myenv.docker.base_image\n",
|
||||
"myenv.docker.base_image_registry"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also specify whether to use GPU or shared volumes, and shm size."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"myenv.docker.gpu_support\n",
|
||||
"myenv.docker.shared_volumes\n",
|
||||
"myenv.docker.shm_size"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Spark and Azure Databricks settings\n",
|
||||
"\n",
|
||||
"In addition to Python and Docker settings, Environment also contains attributes for Spark and Azure Databricks runs. These attributes become relevant when you submit runs on those compute targets."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"Learn more about remote runs on different compute targets:\n",
|
||||
"\n",
|
||||
"* [Train on ML Compute](../../train-on-amlcompute)\n",
|
||||
"\n",
|
||||
"* [Train on remote VM](../../train-on-remote-vm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "roastala"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
201
how-to-use-azureml/work-with-data/dataprep/README.md
Normal file
201
how-to-use-azureml/work-with-data/dataprep/README.md
Normal file
@@ -0,0 +1,201 @@
|
||||
# Azure Machine Learning Data Prep SDK
|
||||
|
||||
The Azure Machine Learning Data Prep SDK helps data scientists explore, cleanse and transform data for machine learning workflows in any Python environment.
|
||||
|
||||
Key benefits to the SDK:
|
||||
- Cross-platform functionality. Write with a single SDK and run it on Windows, macOS, or Linux.
|
||||
- Intelligent transformations powered by AI, including grouping similar values to their canonical form and deriving columns by examples without custom code.
|
||||
- Capability to work with large, multiple files of different schema.
|
||||
- Scalability on a single machine by streaming data during processing rather than loading into memory.
|
||||
- Seamless integration with other Azure Machine Learning services. You can simply pass your prepared data file into `AutoMLConfig` object for automated machine learning training.
|
||||
|
||||
You will find in this repo:
|
||||
- [Getting Started Tutorial](tutorials/getting-started/getting-started.ipynb) for a quick introduction to the main features of Data Prep SDK.
|
||||
- [Case Study Notebooks](case-studies/new-york-taxi) that present an end-to-end data preparation tutorial where users start with small dataset, profile data with statistics summary, cleanse and perform feature engineering. All transformation steps are saved in a dataflow object. Users can easily reapply the same steps on the full dataset, and run it on Spark.
|
||||
- [How-To Guide Notebooks](how-to-guides) for more in-depth sample code at feature level.
|
||||
|
||||
## Installation
|
||||
Here are the [SDK installation steps](https://docs.microsoft.com/python/api/overview/azure/dataprep/intro?view=azure-dataprep-py#install).
|
||||
|
||||
## Documentation
|
||||
Here is more information on how to use the new Data Prep SDK:
|
||||
- [SDK overview and API reference docs](http://aka.ms/data-prep-sdk) that show different classes, methods, and function parameters for the SDK.
|
||||
- [Tutorial: Prep NYC taxi data](https://docs.microsoft.com/azure/machine-learning/service/tutorial-data-prep) for regression modeling and then run automated machine learning to build the model.
|
||||
- [How to load data](https://docs.microsoft.com/azure/machine-learning/service/how-to-load-data) is an overview guide on how to load data using the Data Prep SDK.
|
||||
- [How to transform data](https://docs.microsoft.com/azure/machine-learning/service/how-to-transform-data) is an overview guide on how to transform data.
|
||||
- [How to write data](https://docs.microsoft.com/azure/machine-learning/service/how-to-write-data) is an overview guide on how to write data to different storage locations.
|
||||
|
||||
## Support
|
||||
|
||||
If you have any questions or feedback, send us an email at: [askamldataprep@microsoft.com](mailto:askamldataprep@microsoft.com).
|
||||
|
||||
## Release Notes
|
||||
|
||||
### 2019-04-08 (version 1.1.1)
|
||||
|
||||
New features
|
||||
- You can read multiple Datastore/DataPath/DataReference sources using read_* transforms.
|
||||
- You can perform the following operations on columns to create a new column: division, floor, modulo, power, length.
|
||||
- Data Prep is now part of the Azure ML diagnostics suite and will log diagnostic information by default.
|
||||
- To turn this off, set this environment variable to true: DISABLE_DPREP_LOGGER
|
||||
|
||||
Bug fixes and improvements
|
||||
- Improved code documentation for commonly used classes and functions.
|
||||
- Fixed a bug in auto_read_file that failed to read Excel files.
|
||||
- Added option to overwrite the folder in read_pandas_dataframe.
|
||||
- Improved performance of dotnetcore2 dependency installation, and added support for Fedora 27/28 and Ubuntu 1804.
|
||||
- Improved the performance of reading from Azure Blobs.
|
||||
- Column type detection now supports columns of type Long.
|
||||
- Fixed a bug where some date values were being displayed as timestamps instead of Python datetime objects.
|
||||
- Fixed a bug where some type counts were being displayed as doubles instead of integers.
|
||||
|
||||
### 2019-03-25 (version 1.1.0)
|
||||
|
||||
Breaking changes
|
||||
- The concept of the Data Prep Package has been deprecated and is no longer supported. Instead of persisting multiple Dataflows in one Package, you can persist Dataflows individually.
|
||||
- How-to guide: [Opening and Saving Dataflows notebook](https://aka.ms/aml-data-prep-open-save-dataflows-nb)
|
||||
|
||||
New features
|
||||
- Data Prep can now recognize columns that match a particular Semantic Type, and split accordingly. The STypes currently supported include: email address, geographic coordinates (latitude & longitude), IPv4 and IPv6 addresses, US phone number, and US zip code.
|
||||
- How-to guide: [Semantic Types notebook](https://aka.ms/aml-data-prep-semantic-types-nb)
|
||||
- Data Prep now supports the following operations to generate a resultant column from two numeric columns: subtract, multiply, divide, and modulo.
|
||||
- You can call `verify_has_data()` on a Dataflow to check whether the Dataflow would produce records if executed.
|
||||
|
||||
Bug fixes and improvements
|
||||
- You can now specify the number of bins to use in a histogram for numeric column profiles.
|
||||
- The `read_pandas_dataframe` transform now requires the DataFrame to have string- or byte- typed column names.
|
||||
- Fixed a bug in the `fill_nulls` transform, where values were not correctly filled in if the column was missing.
|
||||
|
||||
### 2019-03-11 (version 1.0.17)
|
||||
|
||||
New features
|
||||
- Now supports adding two numeric columns to generate a resultant column using the expression language.
|
||||
|
||||
Bug fixes and improvements
|
||||
- Improved the documentation and parameter checking for random_split.
|
||||
|
||||
### 2019-02-27 (version 1.0.16)
|
||||
|
||||
Bug fix
|
||||
- Fixed a Service Principal authentication issue that was caused by an API change.
|
||||
|
||||
### 2019-02-25 (version 1.0.15)
|
||||
|
||||
New features
|
||||
- Data Prep now supports writing file streams from a dataflow. Also provides the ability to manipulate the file stream names to create new file names.
|
||||
- How-to guide: [Working With File Streams notebook](https://aka.ms/aml-data-prep-file-stream-nb)
|
||||
|
||||
Bug fixes and improvements
|
||||
- Improved performance of t-Digest on large data sets.
|
||||
- Data Prep now supports reading data from a DataPath.
|
||||
- One hot encoding now works on boolean and numeric columns.
|
||||
- Other miscellaneous bug fixes.
|
||||
|
||||
### 2019-02-11 (version 1.0.12)
|
||||
|
||||
New features
|
||||
- Data Prep now supports reading from an Azure SQL database using Datastore.
|
||||
|
||||
Changes
|
||||
- Significantly improved the memory performance of certain operations on large data.
|
||||
- `read_pandas_dataframe()` now requires `temp_folder` to be specified.
|
||||
- The `name` property on `ColumnProfile` has been deprecated - use `column_name` instead.
|
||||
|
||||
### 2019-01-28 (version 1.0.8)
|
||||
|
||||
Bug fixes
|
||||
- Significantly improved the performance of getting data profiles.
|
||||
- Fixed minor bugs related to error reporting.
|
||||
|
||||
### 2019-01-14 (version 1.0.7)
|
||||
|
||||
New features
|
||||
- Datastore improvements (documented in [Datastore how-to-guide](https://aka.ms/aml-data-prep-datastore-nb))
|
||||
- Added ability to read from and write to Azure File Share and ADLS Datastores in scale-up.
|
||||
- When using Datastores, Data Prep now supports using service principal authentication instead of interactive authentication.
|
||||
- Added support for wasb and wasbs urls.
|
||||
|
||||
### 2019-01-09 (version 1.0.6)
|
||||
|
||||
Bug fixes
|
||||
- Fixed bug with reading from public readable Azure Blob containers on Spark.
|
||||
|
||||
### 2018-12-19 (version 1.0.4)
|
||||
|
||||
New features
|
||||
- `to_bool` function now allows mismatched values to be converted to Error values. This is the new default mismatch behavior for `to_bool` and `set_column_types`, whereas the previous default behavior was to convert mismatched values to False.
|
||||
- When calling `to_pandas_dataframe`, there is a new option to interpret null/missing values in numeric columns as NaN.
|
||||
- Added ability to check the return type of some expressions to ensure type consistency and fail early.
|
||||
- You can now call `parse_json` to parse values in a column as JSON objects and expand them into multiple columns.
|
||||
|
||||
Bug fixes
|
||||
- Fixed a bug that crashed `set_column_types` in Python 3.5.2.
|
||||
- Fixed a bug that crashed when connecting to Datastore using an AML image.
|
||||
|
||||
### 2018-12-07 (version 0.5.3)
|
||||
|
||||
Fixed missing dependency issue for .NET Core2 on Ubuntu 16.
|
||||
|
||||
### 2018-12-03 (version 0.5.2)
|
||||
|
||||
Breaking changes
|
||||
- `SummaryFunction.N` was renamed to `SummaryFunction.Count`.
|
||||
|
||||
Bug fixes
|
||||
- Use latest AML Run Token when reading from and writing to datastores on remote runs. Previously, if the AML Run Token is updated in Python, the Data Prep runtime will not be updated with the updated AML Run Token.
|
||||
- Additional clearer error messages
|
||||
- to_spark_dataframe() will no longer crash when Spark uses Kryo serialization
|
||||
- Value Count Inspector can now show more than 1000 unique values
|
||||
- Random Split no longer fails if the original Dataflow doesn’t have a name
|
||||
|
||||
### 2018-11-19 (version 0.5.0)
|
||||
|
||||
New features
|
||||
- Created a new DataPrep CLI to execute DataPrep packages and view the data profile for a dataset or dataflow
|
||||
- Redesigned SetColumnType API to improve usability
|
||||
- Renamed smart_read_file to auto_read_file
|
||||
- Now includes skew and kurtosis in the Data Profile
|
||||
- Can sample with stratified sampling
|
||||
- Can read from zip files that contain CSV files
|
||||
- Can split datasets row-wise with Random Split (e.g. into test-train sets)
|
||||
- Can get all the column data types from a dataflow or a data profile by calling .dtypes
|
||||
- Can get the row count from a dataflow or a data profile by calling .row_count
|
||||
|
||||
Bug fixes
|
||||
- Fixed long to double conversion
|
||||
- Fixed assert after any add column
|
||||
- Fixed an issue with FuzzyGrouping, where it would not detect groups in some cases
|
||||
- Fixed sort function to respect multi-column sort order
|
||||
- Fixed and/or expressions to be similar to how Pandas handles them
|
||||
- Fixed reading from dbfs path.
|
||||
- Made error messages more understandable
|
||||
- Now no longer fails when reading on remote compute target using AML token
|
||||
- Now no longer fails on Linux DSVM
|
||||
- Now no longer crashes when non-string values are in string predicates
|
||||
- Now handles assertion errors when Dataflow should fail correctly
|
||||
- Now supports dbutils mounted storage locations on Azure Databricks
|
||||
|
||||
### 2018-11-05 (version 0.4.0)
|
||||
|
||||
New features
|
||||
- Type Count added to Data Profile
|
||||
- Value Count and Histogram is now available
|
||||
- More percentiles in Data Profile
|
||||
- The Median is available in Summarize
|
||||
- Python 3.7 is now supported
|
||||
- When you save a dataflow that contains datastores to a Data Prep package, the datastore information will be persisted as part of the Data Prep package
|
||||
- Writing to datastore is now supported
|
||||
|
||||
Bug fixes
|
||||
- 64bit unsigned integer overflows are now handled properly on Linux
|
||||
- Fixed incorrect text label for plain text files in smart_read
|
||||
- String column type now shows up in metrics view
|
||||
- Type count now is fixed to show ValueKinds mapped to single FieldType instead of individual ones
|
||||
- Write_to_csv no longer fails when path is provided as a string
|
||||
- When using Replace, leaving “find” blank will no longer fail
|
||||
|
||||
## Datasets License Information
|
||||
|
||||
IMPORTANT: Please read the notice and find out more about this NYC Taxi and Limousine Commission dataset here: http://www.nyc.gov/html/tlc/html/about/trip_record_data.shtml
|
||||
|
||||
IMPORTANT: Please read the notice and find out more about this Chicago Police Department dataset here: https://catalog.data.gov/dataset/crimes-2001-to-present-398a4
|
||||
@@ -0,0 +1,508 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Cleaning up New York Taxi Cab data\n",
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.<br>\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's use DataPrep to clean and featurize the data which can then be used to predict taxi trip duration. We will not use the For Hire Vehicle (FHV) datasets as they are not really taxi rides and they don't provide drop-off time and geo-coordinates."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import display\n",
|
||||
"from os import path\n",
|
||||
"from tempfile import mkdtemp\n",
|
||||
"\n",
|
||||
"import pandas as pd\n",
|
||||
"import azureml.dataprep as dprep"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's take a quick peek at yellow cab data and green cab data to see what the data looks like. DataPrep supports globing, so you will notice below that we have added a `*` in the path.\n",
|
||||
"\n",
|
||||
"*We are using a small sample of the taxi data for this demo. You can find a bigger sample ~6GB by changing \"green-small\" to \"green-sample\" and \"yellow-small\" to \"yellow-sample\" in the paths below.*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pd.set_option('display.max_columns', None)\n",
|
||||
"\n",
|
||||
"cache_location = mkdtemp()\n",
|
||||
"dataset_root = \"https://dprepdata.blob.core.windows.net/demo\"\n",
|
||||
"\n",
|
||||
"green_path = \"/\".join([dataset_root, \"green-small/*\"])\n",
|
||||
"yellow_path = \"/\".join([dataset_root, \"yellow-small/*\"])\n",
|
||||
"\n",
|
||||
"print(\"Retrieving data from the following two sources:\")\n",
|
||||
"print(green_path)\n",
|
||||
"print(yellow_path)\n",
|
||||
"\n",
|
||||
"green_df = dprep.read_csv(path=green_path, header=dprep.PromoteHeadersMode.GROUPED)\n",
|
||||
"yellow_df = dprep.auto_read_file(path=yellow_path)\n",
|
||||
"\n",
|
||||
"display(green_df.head(5))\n",
|
||||
"display(yellow_df.head(5))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data Cleanup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's define some shortcut transforms that will apply to all Dataflows."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"all_columns = dprep.ColumnSelector(term=\".*\", use_regex=True)\n",
|
||||
"drop_if_all_null = [all_columns, dprep.ColumnRelationship(dprep.ColumnRelationship.ALL)]\n",
|
||||
"useful_columns = [\n",
|
||||
" \"cost\", \"distance\"\"distance\", \"dropoff_datetime\", \"dropoff_latitude\", \"dropoff_longitude\",\n",
|
||||
" \"passengers\", \"pickup_datetime\", \"pickup_latitude\", \"pickup_longitude\", \"store_forward\", \"vendor\"\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's first work with the green taxi data and get it into a good shape that then can be combined with the yellow taxi data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tmp_df = (green_df\n",
|
||||
" .replace_na(columns=all_columns)\n",
|
||||
" .drop_nulls(*drop_if_all_null)\n",
|
||||
" .rename_columns(column_pairs={\n",
|
||||
" \"VendorID\": \"vendor\",\n",
|
||||
" \"lpep_pickup_datetime\": \"pickup_datetime\",\n",
|
||||
" \"Lpep_dropoff_datetime\": \"dropoff_datetime\",\n",
|
||||
" \"lpep_dropoff_datetime\": \"dropoff_datetime\",\n",
|
||||
" \"Store_and_fwd_flag\": \"store_forward\",\n",
|
||||
" \"store_and_fwd_flag\": \"store_forward\",\n",
|
||||
" \"Pickup_longitude\": \"pickup_longitude\",\n",
|
||||
" \"Pickup_latitude\": \"pickup_latitude\",\n",
|
||||
" \"Dropoff_longitude\": \"dropoff_longitude\",\n",
|
||||
" \"Dropoff_latitude\": \"dropoff_latitude\",\n",
|
||||
" \"Passenger_count\": \"passengers\",\n",
|
||||
" \"Fare_amount\": \"cost\",\n",
|
||||
" \"Trip_distance\": \"distance\"\n",
|
||||
" })\n",
|
||||
" .keep_columns(columns=useful_columns))\n",
|
||||
"tmp_df.head(5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"green_df = tmp_df"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's do the same thing to yellow taxi data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tmp_df = (yellow_df\n",
|
||||
" .replace_na(columns=all_columns)\n",
|
||||
" .drop_nulls(*drop_if_all_null)\n",
|
||||
" .rename_columns(column_pairs={\n",
|
||||
" \"vendor_name\": \"vendor\",\n",
|
||||
" \"VendorID\": \"vendor\",\n",
|
||||
" \"vendor_id\": \"vendor\",\n",
|
||||
" \"Trip_Pickup_DateTime\": \"pickup_datetime\",\n",
|
||||
" \"tpep_pickup_datetime\": \"pickup_datetime\",\n",
|
||||
" \"Trip_Dropoff_DateTime\": \"dropoff_datetime\",\n",
|
||||
" \"tpep_dropoff_datetime\": \"dropoff_datetime\",\n",
|
||||
" \"store_and_forward\": \"store_forward\",\n",
|
||||
" \"store_and_fwd_flag\": \"store_forward\",\n",
|
||||
" \"Start_Lon\": \"pickup_longitude\",\n",
|
||||
" \"Start_Lat\": \"pickup_latitude\",\n",
|
||||
" \"End_Lon\": \"dropoff_longitude\",\n",
|
||||
" \"End_Lat\": \"dropoff_latitude\",\n",
|
||||
" \"Passenger_Count\": \"passengers\",\n",
|
||||
" \"passenger_count\": \"passengers\",\n",
|
||||
" \"Fare_Amt\": \"cost\",\n",
|
||||
" \"fare_amount\": \"cost\",\n",
|
||||
" \"Trip_Distance\": \"distance\",\n",
|
||||
" \"trip_distance\": \"distance\"\n",
|
||||
" })\n",
|
||||
" .keep_columns(columns=useful_columns))\n",
|
||||
"tmp_df.head(5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"yellow_df = tmp_df"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's now append the rows from the `yellow_df` to `green_df`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"combined_df = green_df.append_rows(dataflows=[yellow_df])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's take a look at the pickup and drop-off coordinates' data profile to see how the data is distributed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"decimal_type = dprep.TypeConverter(data_type=dprep.FieldType.DECIMAL)\n",
|
||||
"combined_df = combined_df.set_column_types(type_conversions={\n",
|
||||
" \"pickup_longitude\": decimal_type,\n",
|
||||
" \"pickup_latitude\": decimal_type,\n",
|
||||
" \"dropoff_longitude\": decimal_type,\n",
|
||||
" \"dropoff_latitude\": decimal_type\n",
|
||||
"})\n",
|
||||
"combined_df.keep_columns(columns=[\n",
|
||||
" \"pickup_longitude\", \"pickup_latitude\", \n",
|
||||
" \"dropoff_longitude\", \"dropoff_latitude\"\n",
|
||||
"]).get_profile()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"From the data profile, we can see that there are coordinates that are missing and coordinates that are not in New York. Let's filter out coordinates not in the [city border](https://mapmakerapp.com?map=5b60a055a191245990310739f658)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tmp_df = (combined_df\n",
|
||||
" .drop_nulls(\n",
|
||||
" columns=[\"pickup_longitude\", \"pickup_latitude\", \"dropoff_longitude\", \"dropoff_latitude\"],\n",
|
||||
" column_relationship=dprep.ColumnRelationship(dprep.ColumnRelationship.ANY)\n",
|
||||
" ) \n",
|
||||
" .filter(dprep.f_and(\n",
|
||||
" dprep.col(\"pickup_longitude\") <= -73.72,\n",
|
||||
" dprep.col(\"pickup_longitude\") >= -74.09,\n",
|
||||
" dprep.col(\"pickup_latitude\") <= 40.88,\n",
|
||||
" dprep.col(\"pickup_latitude\") >= 40.53,\n",
|
||||
" dprep.col(\"dropoff_longitude\") <= -73.72,\n",
|
||||
" dprep.col(\"dropoff_longitude\") >= -74.09,\n",
|
||||
" dprep.col(\"dropoff_latitude\") <= 40.88,\n",
|
||||
" dprep.col(\"dropoff_latitude\") >= 40.53\n",
|
||||
" )))\n",
|
||||
"tmp_df.keep_columns(columns=[\n",
|
||||
" \"pickup_longitude\", \"pickup_latitude\", \n",
|
||||
" \"dropoff_longitude\", \"dropoff_latitude\"\n",
|
||||
"]).get_profile()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"combined_df = tmp_df"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's take a look at the data profile for the `store_forward` column."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"combined_df.keep_columns(columns='store_forward').get_profile()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"From the data profile of `store_forward` above, we can see that the data is inconsistent and there are missing values. Let's fix them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"combined_df = combined_df.replace(columns=\"store_forward\", find=\"0\", replace_with=\"N\").fill_nulls(\"store_forward\", \"N\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's now split the pick up and drop off datetimes into a date column and a time column. We will use `split_column_by_example` to perform the split. If the `example` parameter of `split_column_by_example` is omitted, we will automatically try to figure out where to split based on the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tmp_df = (combined_df\n",
|
||||
" .split_column_by_example(source_column=\"pickup_datetime\")\n",
|
||||
" .split_column_by_example(source_column=\"dropoff_datetime\"))\n",
|
||||
"tmp_df.head(5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"combined_df = tmp_df"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's rename the columns generated by `split_column_by_example` into meaningful names."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tmp_df = (combined_df\n",
|
||||
" .rename_columns(column_pairs={\n",
|
||||
" \"pickup_datetime_1\": \"pickup_date\",\n",
|
||||
" \"pickup_datetime_2\": \"pickup_time\",\n",
|
||||
" \"dropoff_datetime_1\": \"dropoff_date\",\n",
|
||||
" \"dropoff_datetime_2\": \"dropoff_time\"\n",
|
||||
" }))\n",
|
||||
"tmp_df.head(5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"combined_df = tmp_df"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Feature Engineering"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Datetime features"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's split the pickup and drop-off date further into day of week, day of month, and month. For pickup and drop-off time columns, we will split it into hour, minute, and second."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tmp_df = (combined_df\n",
|
||||
" .derive_column_by_example(\n",
|
||||
" source_columns=\"pickup_date\", \n",
|
||||
" new_column_name=\"pickup_weekday\", \n",
|
||||
" example_data=[(\"2009-01-04\", \"Sunday\"), (\"2013-08-22\", \"Thursday\")]\n",
|
||||
" )\n",
|
||||
" .derive_column_by_example(\n",
|
||||
" source_columns=\"dropoff_date\",\n",
|
||||
" new_column_name=\"dropoff_weekday\",\n",
|
||||
" example_data=[(\"2013-08-22\", \"Thursday\"), (\"2013-11-03\", \"Sunday\")]\n",
|
||||
" )\n",
|
||||
" .split_column_by_example(source_column=\"pickup_date\")\n",
|
||||
" .split_column_by_example(source_column=\"pickup_time\")\n",
|
||||
" .split_column_by_example(source_column=\"dropoff_date\")\n",
|
||||
" .split_column_by_example(source_column=\"dropoff_time\")\n",
|
||||
" .split_column_by_example(source_column=\"pickup_time_1\")\n",
|
||||
" .split_column_by_example(source_column=\"dropoff_time_1\")\n",
|
||||
" .drop_columns(columns=[\n",
|
||||
" \"pickup_date\", \"pickup_time\", \"dropoff_date\", \"dropoff_time\", \n",
|
||||
" \"pickup_date_1\", \"dropoff_date_1\", \"pickup_time_1\", \"dropoff_time_1\"\n",
|
||||
" ])\n",
|
||||
" .rename_columns(column_pairs={\n",
|
||||
" \"pickup_date_2\": \"pickup_month\",\n",
|
||||
" \"pickup_date_3\": \"pickup_monthday\",\n",
|
||||
" \"pickup_time_1_1\": \"pickup_hour\",\n",
|
||||
" \"pickup_time_1_2\": \"pickup_minute\",\n",
|
||||
" \"pickup_time_2\": \"pickup_second\",\n",
|
||||
" \"dropoff_date_2\": \"dropoff_month\",\n",
|
||||
" \"dropoff_date_3\": \"dropoff_monthday\",\n",
|
||||
" \"dropoff_time_1_1\": \"dropoff_hour\",\n",
|
||||
" \"dropoff_time_1_2\": \"dropoff_minute\",\n",
|
||||
" \"dropoff_time_2\": \"dropoff_second\"\n",
|
||||
" }))\n",
|
||||
"tmp_df.head(5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"combined_df = tmp_df"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"From the data above, we can see that the pickup and drop-off date and time components produced from the transforms above looks good. Let's drop the `pickup_datetime` and `dropoff_datetime` columns as they are no longer needed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tmp_df = combined_df.drop_columns(columns=[\"pickup_datetime\", \"dropoff_datetime\"])\n",
|
||||
"tmp_df.head(5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"combined_df = tmp_df"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's now save the transformation steps into a DataPrep package so we can use it to to run on spark."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dflow_path = path.join(mkdtemp(), \"new_york_taxi.dprep\")\n",
|
||||
"combined_df.save(file_path=dflow_path)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "sihhu"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,129 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Scale-Out Data Preparation\n",
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.<br>\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Once we are done with preparing and featurizing the data locally, we can run the same steps on the full dataset in scale-out mode. The new york taxi cab data is about 300GB in total, which is perfect for scale-out. Let's start by downloading the package we saved earlier to disk. Feel free to run the `new_york_taxi_cab.ipynb` notebook to generate the package yourself, in which case you may comment out the download code and set the `package_path` to where the package is saved."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from tempfile import mkdtemp\n",
|
||||
"from os import path\n",
|
||||
"from urllib.request import urlretrieve\n",
|
||||
"\n",
|
||||
"dflow_root = mkdtemp()\n",
|
||||
"dflow_path = path.join(dflow_root, \"new_york_taxi.dprep\")\n",
|
||||
"print(\"Downloading Dataflow to: {}\".format(dflow_path))\n",
|
||||
"urlretrieve(\"https://dprepdata.blob.core.windows.net/demo/new_york_taxi_v2.dprep\", dflow_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's load the package we just downloaded."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.dataprep as dprep\n",
|
||||
"\n",
|
||||
"df = dprep.Dataflow.open(dflow_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's replace the datasources with the full dataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from uuid import uuid4\n",
|
||||
"\n",
|
||||
"other_step = df._get_steps()[7].arguments['dataflows'][0]['anonymousSteps'][0]\n",
|
||||
"other_step['id'] = str(uuid4())\n",
|
||||
"other_step['arguments']['path']['target'] = 1\n",
|
||||
"other_step['arguments']['path']['resourceDetails'][0]['path'] = 'https://wranglewestus.blob.core.windows.net/nyctaxi/yellow_tripdata*'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"green_dsource = dprep.BlobDataSource(\"https://wranglewestus.blob.core.windows.net/nyctaxi/green_tripdata*\")\n",
|
||||
"df = df.replace_datasource(green_dsource)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Once we have replaced the datasource, we can now run the same steps on the full dataset. We will print the first 5 rows of the spark DataFrame. Since we are running on the full dataset, this might take a little while depending on your spark cluster's size."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"spark_df = df.take(5).to_pandas_dataframe()\n",
|
||||
"spark_df.head(5)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "sihhu"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.4"
|
||||
},
|
||||
"skip_execute_as_test": true
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDmkkyF0BwipZow
|
||||
Wd1AMkRkySx0y079JPxpsYhv4i1xXKdoa9bpFqwoXmJpeQM1JWnU4UeZzFeM86qK
|
||||
AhQvL4KV4kibcP2ENvu2NKFEdotO3uxPJ+6GlcYwMYzy+tUj008KnnRZfTrR78sJ
|
||||
tIl3C6lnVL0ICihksG59P1sskRq3PvOjXLAdEZalwDjZ4ZPoNDZdj6nUjB2l8zqu
|
||||
pKAt5mR+bJ9Sox4yrDuNhMmFt5QsRDRe3wUqdV+C9OCWHmjlmsjrYw7p9YmjBDvC
|
||||
5U7mF0Mk/XeYFzj0pkXKQVqBL6xqig+q5ob0szYfg19iDeFhS3iIsRcJGEnRVW/A
|
||||
NpsBZyKrAgMBAAECggEBANlvP8C1F8NInhZYuIAwpzTQTh86Fxw8g9h8dijkh2wv
|
||||
LyQXBk07d1B+aZoDZ5X32UzKwcX04N9obfvFqBkzWZdVFJmZvUmwvEEActBoZkkT
|
||||
io+/HX5HweVy5PPCvbsSK6jc8uXtZcnSs4tMeJIOKkvqqnTpd1w00Y1FcQqfMC16
|
||||
4p7o8wbt6OFoFAYqcxeVYVwDzCTLZD3+iJaqmntkBkoDndJy52yXQmMq5z1wbQVp
|
||||
BL6+L9nTvmouy64jiHVSKOx8nnWThYfHsXoPv+rYywjeuK/v3hyaTAwogs36ooEn
|
||||
SnuTBRvJcumN9Q0XIVlxKMVBcGyyAP+0yNKGz5NQgdECgYEA/I/Uq1E3epPJgEWR
|
||||
Bub+LpCgwtrw/lgKncb/Q/AiE9qoXobUe4KNU8aGaNMb7uVNLckY7cOluLS6SQb3
|
||||
Mzwk2Jl0G3vk8rW46tZWvSYB8+zAR2Rz7seUOT9SE5OmvwpnHrnp3nRr1vvVd2bp
|
||||
Q/ypwMLrwWQN51Kr+oTS74bUbrkCgYEA6bXVIUyao7z2Q3qAr6h+6JEWDbkJA7hJ
|
||||
BjHIOXvxd1tMoJJX+X9+IE/2XoJaUkGCb0vrM/hi1cyQFmS4Or/J6IWSZu8oBpDr
|
||||
EBmIK3PF1nrzNvWD28wM46c6ScehyWSm/u4bJWSm9liTX3dv5Kpa6ym7yLKc3c0B
|
||||
ECpSJM+5SoMCgYEAq585Tukzn/IJPUcIk/4nv5C8DW0l0lAVdr2g/JOTNJajTwik
|
||||
HwHJ86G1+Elsc9wRpAlBDWCjnm4BIFrBZGl8SEuOoJaCL4PZEotwCbxoG09IIbtb
|
||||
JGkuifBDX9Y3ux3gkPqYt3e5SC99EVQ3MuHgoIJUHehVolmFUAkuJWIjvNECgYEA
|
||||
5pU0VspRuELzZdgzpxvDOooLDDcHodfslGQBfFXBA1Xc4IACtHMJaa/7D3vkyUtA
|
||||
+bYZtQjX2sEdWDq/WZdoCjXfIBfNkczhXt0R8G0lQFvGIu9QzUchYGrZo3mHMkBQ
|
||||
Uy1xMw9/e4YgwQwCJcW+Nk7Sq00uX9enuN9IdHFOCykCgYAqAGMK6CH1tlpjvHrf
|
||||
k+ZhigYxTXBlsVVvK1BIGGaiwzDpn65zeQp4aLOjSZkI1LuRi3tfTiZ321jRd64J
|
||||
4lGk5Jurqv5grDmxROX/U50wEYbI9ncu/thU7syUdxDiqxHPI2RMG50mRcm3a55p
|
||||
ZCNSqkMlcXyA0U1z8C1ILNUsbA==
|
||||
-----END PRIVATE KEY-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICoTCCAYkCAgPoMA0GCSqGSIb3DQEBBQUAMBQxEjAQBgNVBAMMCUNMSS1Mb2dp
|
||||
bjAiGA8yMDE4MDcxMzIzMjA0N1oYDzIwMTkwNzEzMjMyMDQ5WjAUMRIwEAYDVQQD
|
||||
DAlDTEktTG9naW4wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDmkkyF
|
||||
0BwipZowWd1AMkRkySx0y079JPxpsYhv4i1xXKdoa9bpFqwoXmJpeQM1JWnU4UeZ
|
||||
zFeM86qKAhQvL4KV4kibcP2ENvu2NKFEdotO3uxPJ+6GlcYwMYzy+tUj008KnnRZ
|
||||
fTrR78sJtIl3C6lnVL0ICihksG59P1sskRq3PvOjXLAdEZalwDjZ4ZPoNDZdj6nU
|
||||
jB2l8zqupKAt5mR+bJ9Sox4yrDuNhMmFt5QsRDRe3wUqdV+C9OCWHmjlmsjrYw7p
|
||||
9YmjBDvC5U7mF0Mk/XeYFzj0pkXKQVqBL6xqig+q5ob0szYfg19iDeFhS3iIsRcJ
|
||||
GEnRVW/ANpsBZyKrAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAI4VlaFb9NsXMLdT
|
||||
Cw5/pk0Xo2Qi6483RGTy8vzrw88IE7f3juB/JWG+rayjtW5bBRx2fae4/ZIdZ4zg
|
||||
N2FDKn2PQPAc9m9pcKyUKUvWOC8ixSkrUmeQew0l1AXU0hsPSlJ7/7ZK4efoyB47
|
||||
hj71fsyKdyKbisZDcUFBq/S8PazdPF0YOD1W/4A2tW0cSMg+jmFWynuUTdWt3SU8
|
||||
CwBGqdiSKT5faJuYwIWnRXDEQS3ObRn1OFEfFdd4d2sxjxydWKRgnINnGlBdiFAT
|
||||
KzCozVr+75cO2ErH6x5C0hLQGG5BxXbaijyxyvaRNokTMVVv6OaDEnjzCGfJ72Yf
|
||||
2wgitNc=
|
||||
-----END CERTIFICATE-----
|
||||
@@ -0,0 +1,54 @@
|
||||
"Retrieved from https://en.wikipedia.org/wiki/Chicago_City_Council on November 6, 2018"
|
||||
|
||||
|
||||
Ward,Name,Took Office,Party
|
||||
1,Proco Joe Moreno,2010*,Dem
|
||||
2,Brian Hopkins,2015,Dem
|
||||
3,Pat Dowell,2007,Dem
|
||||
4,Sophia King,2016*,Dem
|
||||
5,Leslie Hairston,1999,Dem
|
||||
6,Roderick Sawyer,2011,Dem
|
||||
7,Gregory Mitchell,2015,Dem
|
||||
8,Michelle A. Harris,2006*,Dem
|
||||
9,Anthony Beale,1999,Dem
|
||||
10,Susie Sadlowski Garza,2015,Dem
|
||||
11,Patrick Daley Thompson,2015,Dem
|
||||
12,George Cardenas,2003,Dem
|
||||
13,Marty Quinn,2011,Dem
|
||||
14,Edward M. Burke,1969,Dem
|
||||
15,Raymond Lopez,2015,Dem
|
||||
16,Toni Foulkes,2007,Dem
|
||||
17,David H. Moore,2015,Dem
|
||||
18,Derrick Curtis,2015,Dem
|
||||
19,Matthew O'Shea,2011,Dem
|
||||
20,Willie Cochran,2007,Dem
|
||||
21,Howard Brookins Jr.,2003,Dem
|
||||
22,Ricardo Muñoz,1993*,Dem
|
||||
23,Silvana Tabares,2018*,Dem
|
||||
24,"Michael Scott, Jr.",2015,Dem
|
||||
25,Daniel Solis,1996*,Dem
|
||||
26,Roberto Maldonado,2009*,Dem
|
||||
27,"Walter Burnett, Jr.",1995,Dem
|
||||
28,Jason Ervin,2011*,Dem
|
||||
29,Chris Taliaferro,2015,Dem
|
||||
30,Ariel Reboyras,2003,Dem
|
||||
31,Milly Santiago,2015,Dem
|
||||
32,Scott Waguespack,2007,Dem
|
||||
33,Deb Mell,2013*,Dem
|
||||
34,Carrie Austin,1994*,Dem
|
||||
35,Carlos Ramirez-Rosa,2015,Dem
|
||||
36,Gilbert Villegas,2015,Dem
|
||||
37,Emma Mitts,2000*,Dem
|
||||
38,Nicholas Sposato,2011,Ind
|
||||
39,Margaret Laurino,1994*,Dem
|
||||
40,Patrick J. O'Connor,1983,Dem
|
||||
41,Anthony Napolitano,2015,Rep
|
||||
42,Brendan Reilly,2007,Dem
|
||||
43,Michele Smith,2011,Dem
|
||||
44,Thomas M. Tunney,2002*,Dem
|
||||
45,John Arena,2011,Dem
|
||||
46,James Cappleman,2011,Dem
|
||||
47,Ameya Pawar,2011,Dem
|
||||
48,Harry Osterman,2011,Dem
|
||||
49,Joe Moore,1991,Dem
|
||||
50,Debra Silverstein,2011,Dem
|
||||
|
@@ -0,0 +1,15 @@
|
||||
File updated 11/2/2018
|
||||
|
||||
|
||||
|
||||
ID|Case Number|Date|Block|IUCR|Primary Type|Description|Location Description|Arrest|Domestic|Beat|District|Ward|Community Area|FBI Code|X Coordinate|Y Coordinate|Year|Updated On|Latitude|Longitude|Location
|
||||
10140490|HY329907|07/05/2015 11:50:00 PM|050XX N NEWLAND AVE|0820|THEFT|$500 AND UNDER|STREET|false|false|1613|016|41|10|06|1129230|1933315|2015|07/12/2015 12:42:46 PM|41.973309466|-87.800174996|(41.973309466, -87.800174996)
|
||||
10139776|HY329265|07/05/2015 11:30:00 PM|011XX W MORSE AVE|0460|BATTERY|SIMPLE|STREET|false|true|2431|024|49|1|08B|1167370|1946271|2015|07/12/2015 12:42:46 PM|42.008124017|-87.65955018|(42.008124017, -87.65955018)
|
||||
10140270|HY329253|07/05/2015 11:20:00 PM|121XX S FRONT AVE|0486|BATTERY|DOMESTIC BATTERY SIMPLE|STREET|false|true|0532||9|53|08B|||2015|07/12/2015 12:42:46 PM|||
|
||||
10139885|HY329308|07/05/2015 11:19:00 PM|051XX W DIVISION ST|0610|BURGLARY|FORCIBLE ENTRY|SMALL RETAIL STORE|false|false|1531|015|37|25|05|1141721|1907465|2015|07/12/2015 12:42:46 PM|41.902152027|-87.754883404|(41.902152027, -87.754883404)
|
||||
10140379|HY329556|07/05/2015 11:00:00 PM|012XX W LAKE ST|0930|MOTOR VEHICLE THEFT|THEFT/RECOVERY: AUTOMOBILE|STREET|false|false|1215|012|27|28|07|1168413|1901632|2015|07/12/2015 12:42:46 PM|41.885610142|-87.657008701|(41.885610142, -87.657008701)
|
||||
10140868|HY330421|07/05/2015 10:54:00 PM|118XX S PEORIA ST|1320|CRIMINAL DAMAGE|TO VEHICLE|VEHICLE NON-COMMERCIAL|false|false|0524|005|34|53|14|1172409|1826485|2015|07/12/2015 12:42:46 PM|41.6793109|-87.644545209|(41.6793109, -87.644545209)
|
||||
10139762|HY329232|07/05/2015 10:42:00 PM|026XX W 37TH PL|1020|ARSON|BY FIRE|VACANT LOT/LAND|false|false|0911|009|12|58|09|1159436|1879658|2015|07/12/2015 12:42:46 PM|41.825500607|-87.690578042|(41.825500607, -87.690578042)
|
||||
10139722|HY329228|07/05/2015 10:30:00 PM|016XX S CENTRAL PARK AVE|1811|NARCOTICS|POSS: CANNABIS 30GMS OR LESS|ALLEY|true|false|1021|010|24|29|18|1152687|1891389|2015|07/12/2015 12:42:46 PM|41.857827814|-87.715028789|(41.857827814, -87.715028789)
|
||||
10139774|HY329209|07/05/2015 10:15:00 PM|048XX N ASHLAND AVE|1310|CRIMINAL DAMAGE|TO PROPERTY|APARTMENT|false|false|2032|020|46|3|14|1164821|1932394|2015|07/12/2015 12:42:46 PM|41.970099796|-87.669324377|(41.970099796, -87.669324377)
|
||||
10139697|HY329177|07/05/2015 10:10:00 PM|058XX S ARTESIAN AVE|1320|CRIMINAL DAMAGE|TO VEHICLE|ALLEY|false|false|0824|008|16|63|14|1160997|1865851|2015|07/12/2015 12:42:46 PM|41.787580282|-87.685233078|(41.787580282, -87.685233078)
|
||||
|
@@ -0,0 +1,11 @@
|
||||
ID,Case Number,Date,Block,IUCR,Primary Type,Description,Location Description,Arrest,Domestic,Beat,District,Ward,Community Area,FBI Code,X Coordinate,Y Coordinate,Year,Updated On,Latitude,Longitude,Location
|
||||
10498554,HZ239907,4/4/2016 23:56,007XX E 111TH ST,1153,DECEPTIVE PRACTICE,FINANCIAL IDENTITY THEFT OVER $ 300,OTHER,FALSE,FALSE,531,5,9,50,11,1183356,1831503,2016,5/11/2016 15:48,41.69283384,-87.60431945,"(41.692833841, -87.60431945)"
|
||||
10516598,HZ258664,4/15/2016 17:00,082XX S MARSHFIELD AVE,890,THEFT,FROM BUILDING,RESIDENCE,FALSE,FALSE,614,6,21,71,6,1166776,1850053,2016,5/12/2016 15:48,41.74410697,-87.66449429,"(41.744106973, -87.664494285)"
|
||||
10519196,HZ261252,4/15/2016 10:00,104XX S SACRAMENTO AVE,1154,DECEPTIVE PRACTICE,FINANCIAL IDENTITY THEFT $300 AND UNDER,RESIDENCE,FALSE,FALSE,2211,22,19,74,11,,,2016,5/12/2016 15:50,,,
|
||||
10519591,HZ261534,4/15/2016 9:00,113XX S PRAIRIE AVE,1120,DECEPTIVE PRACTICE,FORGERY,RESIDENCE,FALSE,FALSE,531,5,9,49,10,,,2016,5/13/2016 15:51,,,
|
||||
10534446,HZ277630,4/15/2016 10:00,055XX N KEDZIE AVE,890,THEFT,FROM BUILDING,"SCHOOL, PUBLIC, BUILDING",FALSE,FALSE,1712,17,40,13,6,,,2016,5/25/2016 15:59,,,
|
||||
10535059,HZ278872,4/15/2016 4:30,004XX S KILBOURN AVE,810,THEFT,OVER $500,RESIDENCE,FALSE,FALSE,1131,11,24,26,6,,,2016,5/25/2016 15:59,,,
|
||||
10499802,HZ240778,4/15/2016 10:00,010XX N MILWAUKEE AVE,1152,DECEPTIVE PRACTICE,ILLEGAL USE CASH CARD,RESIDENCE,FALSE,FALSE,1213,12,27,24,11,,,2016,5/27/2016 15:45,,,
|
||||
10522293,HZ264802,4/15/2016 16:00,019XX W DIVISION ST,1110,DECEPTIVE PRACTICE,BOGUS CHECK,RESTAURANT,FALSE,FALSE,1424,14,1,24,11,1163094,1908003,2016,5/16/2016 15:48,41.90320604,-87.67636193,"(41.903206037, -87.676361925)"
|
||||
10523111,HZ265911,4/15/2016 8:00,061XX N SHERIDAN RD,1153,DECEPTIVE PRACTICE,FINANCIAL IDENTITY THEFT OVER $ 300,RESIDENCE,FALSE,FALSE,2433,24,48,77,11,,,2016,5/16/2016 15:50,,,
|
||||
10525877,HZ268138,4/15/2016 15:00,023XX W EASTWOOD AVE,1153,DECEPTIVE PRACTICE,FINANCIAL IDENTITY THEFT OVER $ 300,,FALSE,FALSE,1911,19,47,4,11,,,2016,5/18/2016 15:50,,,
|
||||
|
@@ -0,0 +1,11 @@
|
||||
ID,Case Number,Date,Block,IUCR,Primary Type,Description,Location Description,Arrest,Domestic,Beat,District,Ward,Community Area,FBI Code,X Coordinate,Y Coordinate,Year,Updated On,Latitude,Longitude,Location
|
||||
10378283,HZ114126,1/10/2016 11:00,033XX W IRVING PARK RD,610,BURGLARY,FORCIBLE ENTRY,RESIDENCE-GARAGE,TRUE,FALSE,1724,17,33,16,5,1153593,1926401,2016,5/22/2016 15:51,41.95388599,-87.71077048,"(41.95388599, -87.710770479)"
|
||||
10382154,HZ118288,1/10/2016 21:00,055XX S FRANCISCO AVE,1754,OFFENSE INVOLVING CHILDREN,AGG SEX ASSLT OF CHILD FAM MBR,RESIDENCE,FALSE,TRUE,824,8,14,63,2,1157983,1867874,2016,6/1/2016 15:51,41.79319349,-87.69622926,"(41.793193489, -87.696229255)"
|
||||
10374287,HZ110730,1/10/2016 11:50,043XX W ARMITAGE AVE,5002,OTHER OFFENSE,OTHER VEHICLE OFFENSE,STREET,FALSE,TRUE,2522,25,30,20,26,1146917,1912931,2016,6/7/2016 15:55,41.91705356,-87.73565764,"(41.917053561, -87.735657637)"
|
||||
10374662,HZ110403,1/10/2016 1:30,073XX S CLAREMONT AVE,497,BATTERY,AGGRAVATED DOMESTIC BATTERY: OTHER DANG WEAPON,STREET,FALSE,TRUE,835,8,18,66,04B,1162007,1855951,2016,2/4/2016 15:44,41.76039236,-87.68180481,"(41.760392356, -87.681804812)"
|
||||
10374720,HZ110836,1/10/2016 7:30,079XX S RHODES AVE,890,THEFT,FROM BUILDING,OTHER,FALSE,FALSE,624,6,6,44,6,1181279,1852568,2016,2/4/2016 15:44,41.75068679,-87.61127681,"(41.75068679, -87.611276811)"
|
||||
10375178,HZ110832,1/10/2016 14:20,057XX S KEDZIE AVE,460,BATTERY,SIMPLE,RESTAURANT,FALSE,FALSE,824,8,14,63,08B,1156029,1866379,2016,2/4/2016 15:44,41.78913051,-87.7034346,"(41.78913051, -87.703434602)"
|
||||
10398695,HZ135279,1/10/2016 23:00,031XX S PARNELL AVE,620,BURGLARY,UNLAWFUL ENTRY,RESIDENCE-GARAGE,FALSE,FALSE,915,9,11,60,5,1173138,1884117,2016,2/4/2016 15:44,41.8374442,-87.64017699,"(41.837444199, -87.640176991)"
|
||||
10402270,HZ138745,1/10/2016 11:00,051XX S ELIZABETH ST,620,BURGLARY,UNLAWFUL ENTRY,APARTMENT,FALSE,FALSE,934,9,16,61,5,,,2016,2/4/2016 6:53,,,
|
||||
10380619,HZ116583,1/10/2016 9:41,091XX S PAXTON AVE,4387,OTHER OFFENSE,VIOLATE ORDER OF PROTECTION,RESIDENCE,TRUE,TRUE,413,4,7,48,26,1192434,1844707,2016,2/2/2016 15:56,41.72885134,-87.57065553,"(41.728851343, -87.570655525)"
|
||||
10400131,HZ136171,1/10/2016 18:00,0000X W TERMINAL ST,810,THEFT,OVER $500,AIRPORT BUILDING NON-TERMINAL - SECURE AREA,FALSE,FALSE,1651,16,41,76,6,,,2016,2/2/2016 15:58,,,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user