Compare commits
39 Commits
azureml-sd
...
azureml-sd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
41d697e298 | ||
|
|
c3ce932029 | ||
|
|
a956162114 | ||
|
|
cb5a178e40 | ||
|
|
d81c336c59 | ||
|
|
4244a24d81 | ||
|
|
3b488555e5 | ||
|
|
6abc478f33 | ||
|
|
666c2579eb | ||
|
|
5af3aa4231 | ||
|
|
e48d828ab0 | ||
|
|
44aa636c21 | ||
|
|
4678f9adc3 | ||
|
|
5bf85edade | ||
|
|
94f381e884 | ||
|
|
ea1b7599c3 | ||
|
|
6b8a6befde | ||
|
|
c1511b7b74 | ||
|
|
8f007a3333 | ||
|
|
5ad3ca00e8 | ||
|
|
556a41e223 | ||
|
|
407b8929d0 | ||
|
|
18a11bbd8d | ||
|
|
8b439a9f7c | ||
|
|
75c393a221 | ||
|
|
be7176fe06 | ||
|
|
7b41675355 | ||
|
|
fa7685f6fa | ||
|
|
6b444b1467 | ||
|
|
c9767473ae | ||
|
|
648b48fc0c | ||
|
|
04db5d93e2 | ||
|
|
4e10935701 | ||
|
|
f737db499d | ||
|
|
6b66da1558 | ||
|
|
8647aea9d9 | ||
|
|
3ee2dc3258 | ||
|
|
9f7c4ce668 | ||
|
|
036ca6ac75 |
29
Dockerfiles/1.0.17/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
FROM continuumio/miniconda:4.5.11
|
||||||
|
|
||||||
|
# install git
|
||||||
|
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||||
|
|
||||||
|
# create a new conda environment named azureml
|
||||||
|
RUN conda create -n azureml -y -q Python=3.6
|
||||||
|
|
||||||
|
# install additional packages used by sample notebooks. this is optional
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||||
|
|
||||||
|
# install azurmel-sdk components
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.17"]
|
||||||
|
|
||||||
|
# clone Azure ML GitHub sample notebooks
|
||||||
|
RUN cd /home && git clone -b "azureml-sdk-1.0.17" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||||
|
|
||||||
|
# generate jupyter configuration file
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||||
|
|
||||||
|
# set an emtpy token for Jupyter to remove authentication.
|
||||||
|
# this is NOT recommended for production environment
|
||||||
|
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||||
|
|
||||||
|
# open up port 8887 on the container
|
||||||
|
EXPOSE 8887
|
||||||
|
|
||||||
|
# start Jupyter notebook server on port 8887 when the container starts
|
||||||
|
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||||
29
Dockerfiles/1.0.18/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
FROM continuumio/miniconda:4.5.11
|
||||||
|
|
||||||
|
# install git
|
||||||
|
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||||
|
|
||||||
|
# create a new conda environment named azureml
|
||||||
|
RUN conda create -n azureml -y -q Python=3.6
|
||||||
|
|
||||||
|
# install additional packages used by sample notebooks. this is optional
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||||
|
|
||||||
|
# install azurmel-sdk components
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.18"]
|
||||||
|
|
||||||
|
# clone Azure ML GitHub sample notebooks
|
||||||
|
RUN cd /home && git clone -b "azureml-sdk-1.0.18" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||||
|
|
||||||
|
# generate jupyter configuration file
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||||
|
|
||||||
|
# set an emtpy token for Jupyter to remove authentication.
|
||||||
|
# this is NOT recommended for production environment
|
||||||
|
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||||
|
|
||||||
|
# open up port 8887 on the container
|
||||||
|
EXPOSE 8887
|
||||||
|
|
||||||
|
# start Jupyter notebook server on port 8887 when the container starts
|
||||||
|
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||||
29
Dockerfiles/1.0.21/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
FROM continuumio/miniconda:4.5.11
|
||||||
|
|
||||||
|
# install git
|
||||||
|
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||||
|
|
||||||
|
# create a new conda environment named azureml
|
||||||
|
RUN conda create -n azureml -y -q Python=3.6
|
||||||
|
|
||||||
|
# install additional packages used by sample notebooks. this is optional
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||||
|
|
||||||
|
# install azurmel-sdk components
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.21"]
|
||||||
|
|
||||||
|
# clone Azure ML GitHub sample notebooks
|
||||||
|
RUN cd /home && git clone -b "azureml-sdk-1.0.21" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||||
|
|
||||||
|
# generate jupyter configuration file
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||||
|
|
||||||
|
# set an emtpy token for Jupyter to remove authentication.
|
||||||
|
# this is NOT recommended for production environment
|
||||||
|
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||||
|
|
||||||
|
# open up port 8887 on the container
|
||||||
|
EXPOSE 8887
|
||||||
|
|
||||||
|
# start Jupyter notebook server on port 8887 when the container starts
|
||||||
|
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||||
@@ -96,7 +96,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"import azureml.core\n",
|
"import azureml.core\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"This notebook was created using version 1.0.17 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.0.21 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -336,7 +336,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"In this notebook you configured this notebook library to connect easily to an Azure ML workspace. You can copy this notebook to your own libraries to connect them to you workspace, or use it to bootstrap new workspaces completely.\n",
|
"In this notebook you configured this notebook library to connect easily to an Azure ML workspace. You can copy this notebook to your own libraries to connect them to you workspace, or use it to bootstrap new workspaces completely.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"If you came here from another notebook, you can return there and complete that exercise, or you can try out the [Tutorials](./tutorials) or jump into \"how-to\" notebooks and start creating and deploying models. A good place to start is the [train in notebook](./how-to-use-azureml/training/train-in-notebook) example that walks through a simplified but complete end to end machine learning process."
|
"If you came here from another notebook, you can return there and complete that exercise, or you can try out the [Tutorials](./tutorials) or jump into \"how-to\" notebooks and start creating and deploying models. A good place to start is the [train within notebook](./how-to-use-azureml/training/train-within-notebook) example that walks through a simplified but complete end to end machine learning process."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
305
contrib/RAPIDS/README.md
Normal file
@@ -0,0 +1,305 @@
|
|||||||
|
## How to use the RAPIDS on AzureML materials
|
||||||
|
### Setting up requirements
|
||||||
|
The material requires the use of the Azure ML SDK and of the Jupyter Notebook Server to run the interactive execution. Please refer to instructions to [setup the environment.](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#local "Local Computer Set Up") Follow the instructions under **Local Computer**, make sure to run the last step: <span style="font-family: Courier New;">pip install \<new package\></span> with <span style="font-family: Courier New;">new package = progressbar2 (pip install progressbar2)</span>
|
||||||
|
|
||||||
|
After following the directions, the user should end up setting a conda environment (<span style="font-family: Courier New;">myenv</span>)that can be activated in an Anaconda prompt
|
||||||
|
|
||||||
|
The user would also require an Azure Subscription with a Machine Learning Services quota on the desired region for 24 nodes or more (to be able to select a vmSize with 4 GPUs as it is used on the Notebook) on the desired VM family ([NC\_v3](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv3-series), [NC\_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv2-series), [ND](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#nd-series) or [ND_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ndv2-series-preview)), the specific vmSize to be used within the chosen family would also need to be whitelisted for Machine Learning Services usage.
|
||||||
|
|
||||||
|
|
||||||
|
### Getting and running the material
|
||||||
|
Clone the AzureML Notebooks repository in GitHub by running the following command on a local_directory:
|
||||||
|
|
||||||
|
* C:\local_directory>git clone https://github.com/Azure/MachineLearningNotebooks.git
|
||||||
|
|
||||||
|
On a conda prompt navigate to the local directory, activate the conda environment (<span style="font-family: Courier New;">myenv</span>), where the Azure ML SDK was installed and launch Jupyter Notebook.
|
||||||
|
|
||||||
|
* (<span style="font-family: Courier New;">myenv</span>) C:\local_directory>jupyter notebook
|
||||||
|
|
||||||
|
From the resulting browser at http://localhost:8888/tree, navigate to the master notebook:
|
||||||
|
|
||||||
|
* http://localhost:8888/tree/MachineLearningNotebooks/contrib/RAPIDS/azure-ml-with-nvidia-rapids.ipynb
|
||||||
|
|
||||||
|
|
||||||
|
The following notebook will appear:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
### Master Jupyter Notebook
|
||||||
|
The notebook can be executed interactively step by step, by pressing the Run button (In a red circle in the above image.)
|
||||||
|
|
||||||
|
The first couple of functional steps import the necessary AzureML libraries. If you experience any errors please refer back to the [setup the environment.](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#local "Local Computer Set Up") instructions.
|
||||||
|
|
||||||
|
|
||||||
|
#### Setting up a Workspace
|
||||||
|
The following step gathers the information necessary to set up a workspace to execute the RAPIDS script. This needs to be done only once, or not at all if you already have a workspace you can use set up on the Azure Portal:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
It is important to be sure to set the correct values for the subscription\_id, resource\_group, workspace\_name, and region before executing the step. An example is:
|
||||||
|
|
||||||
|
subscription_id = os.environ.get("SUBSCRIPTION_ID", "1358e503-xxxx-4043-xxxx-65b83xxxx32d")
|
||||||
|
resource_group = os.environ.get("RESOURCE_GROUP", "AML-Rapids-Testing")
|
||||||
|
workspace_name = os.environ.get("WORKSPACE_NAME", "AML_Rapids_Tester")
|
||||||
|
workspace_region = os.environ.get("WORKSPACE_REGION", "West US 2")
|
||||||
|
|
||||||
|
|
||||||
|
The resource\_group and workspace_name could take any value, the region should match the region for which the subscription has the required Machine Learning Services node quota.
|
||||||
|
|
||||||
|
The first time the code is executed it will redirect to the Azure Portal to validate subscription credentials. After the workspace is created, its related information is stored on a local file so that this step can be subsequently skipped. The immediate step will just load the saved workspace
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Once a workspace has been created the user could skip its creation and just jump to this step. The configuration file resides in:
|
||||||
|
|
||||||
|
* C:\local_directory\\MachineLearningNotebooks\contrib\RAPIDS\aml_config\config.json
|
||||||
|
|
||||||
|
|
||||||
|
#### Creating an AML Compute Target
|
||||||
|
Following step, creates an AML Compute Target
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Parameter vm\_size on function call AmlCompute.provisioning\_configuration() has to be a member of the VM families ([NC\_v3](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv3-series), [NC\_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv2-series), [ND](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#nd-series) or [ND_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ndv2-series-preview)) that are the ones provided with P40 or V100 GPUs, that are the ones supported by RAPIDS. In this particular case an Standard\_NC24s\_V2 was used.
|
||||||
|
|
||||||
|
|
||||||
|
If the output of running the step has an error of the form:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
It is an indication that even though the subscription has a node quota for VMs for that family, it does not have a node quota for Machine Learning Services for that family.
|
||||||
|
You will need to request an increase node quota for that family in that region for **Machine Learning Services**.
|
||||||
|
|
||||||
|
|
||||||
|
Another possible error is the following:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Which indicates that specified vmSize has not been whitelisted for usage on Machine Learning Services and a request to do so should be filled.
|
||||||
|
|
||||||
|
The successful creation of the compute target would have an output like the following:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
#### RAPIDS script uploading and viewing
|
||||||
|
The next step copies the RAPIDS script process_data.py, which is a slightly modified implementation of the [RAPIDS E2E example](https://github.com/rapidsai/notebooks/blob/master/mortgage/E2E.ipynb), into a script processing folder and it presents its contents to the user. (The script is discussed in the next section in detail).
|
||||||
|
If the user wants to use a different RAPIDS script, the references to the <span style="font-family: Courier New;">process_data.py</span> script have to be changed
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
#### Data Uploading
|
||||||
|
The RAPIDS script loads and extracts features from the Fannie Mae’s Mortgage Dataset to train an XGBoost prediction model. The script uses two years of data
|
||||||
|
|
||||||
|
The next few steps download and decompress the data and is made available to the script as an [Azure Machine Learning Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data).
|
||||||
|
|
||||||
|
|
||||||
|
The following functions are used to download and decompress the input data
|
||||||
|
|
||||||
|
|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
The next step uses those functions to download locally file:
|
||||||
|
http://rapidsai-data.s3-website.us-east-2.amazonaws.com/notebook-mortgage-data/mortgage_2000-2001.tgz'
|
||||||
|
And to decompress it, into local folder path = .\mortgage_2000-2001
|
||||||
|
The step takes several minutes, the intermediate outputs provide progress indicators.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
The decompressed data should have the following structure:
|
||||||
|
* .\mortgage_2000-2001\acq\Acquisition_<year>Q<num>.txt
|
||||||
|
* .\mortgage_2000-2001\perf\Performance_<year>Q<num>.txt
|
||||||
|
* .\mortgage_2000-2001\names.csv
|
||||||
|
|
||||||
|
The data is divided in partitions that roughly correspond to yearly quarters. RAPIDS includes support for multi-node, multi-GPU deployments, enabling scaling up and out on much larger dataset sizes. The user will be able to verify that the number of partitions that the script is able to process increases with the number of GPUs used. The RAPIDS script is implemented for single-machine scenarios. An example supporting multiple nodes will be published later.
|
||||||
|
|
||||||
|
|
||||||
|
The next step upload the data into the [Azure Machine Learning Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data) under reference <span style="font-family: Courier New;">fileroot = mortgage_2000-2001</span>
|
||||||
|
|
||||||
|
The step takes several minutes to load the data, the output provides a progress indicator.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Once the data has been loaded into the Azure Machine LEarning Data Store, in subsequent run, the user can comment out the ds.upload line and just make reference to the <span style="font-family: Courier New;">mortgage_2000-2001</blog> data store reference
|
||||||
|
|
||||||
|
|
||||||
|
#### Setting up required libraries and environment to run RAPIDS code
|
||||||
|
There are two options to setup the environment to run RAPIDS code. The following steps shows how to ues a prebuilt conda environment. A recommended alternative is to specify a base Docker image and package dependencies. You can find sample code for that in the notebook.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
#### Wrapper function to submit the RAPIDS script as an Azure Machine Learning experiment
|
||||||
|
|
||||||
|
The next step consists of the definition of a wrapper function to be used when the user attempts to run the RAPIDS script with different arguments. It takes as arguments: <span style="font-family: Times New Roman;">*cpu\_training*</span>; a flag that indicates if the run is meant to be processed with CPU-only, <span style="font-family: Times New Roman;">*gpu\_count*</span>; the number of GPUs to be used if they are meant to be used and part_count: the number of data partitions to be used
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
The core of the function resides in configuring the run by the instantiation of a ScriptRunConfig object, which defines the source_directory for the script to be executed, the name of the script and the arguments to be passed to the script.
|
||||||
|
In addition to the wrapper function arguments, two other arguments are passed: <span style="font-family: Times New Roman;">*data\_dir*</span>, the directory where the data is stored and <span style="font-family: Times New Roman;">*end_year*</span> is the largest year to use partition from.
|
||||||
|
|
||||||
|
|
||||||
|
As mentioned earlier the size of the data that can be processed increases with the number of gpus, in the function, dictionary <span style="font-family: Times New Roman;">*max\_gpu\_count\_data\_partition_mapping*</span> maps the maximum number of partitions that we empirically found that the system can handle given the number of GPUs used. The function throws a warning when the number of partitions for a given number of gpus exceeds the maximum but the script is still executed, however the user should expect an error as an out of memory situation would be encountered
|
||||||
|
If the user wants to use a different RAPIDS script, the reference to the process_data.py script has to be changed
|
||||||
|
|
||||||
|
|
||||||
|
#### Submitting Experiments
|
||||||
|
We are ready to submit experiments: launching the RAPIDS script with different sets of parameters.
|
||||||
|
|
||||||
|
|
||||||
|
The following couple of steps submit experiments under different conditions.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
The user can change variable num\_gpu between one and the number of GPUs supported by the chosen vmSize. Variable part\_count can take any value between 1 and 11, but if it exceeds the maximum for num_gpu, the run would result in an error
|
||||||
|
|
||||||
|
|
||||||
|
If the experiment is successfully submitted, it would be placed on a queue for processing, its status would appeared as Queued and an output like the following would appear
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
When the experiment starts running, its status would appeared as Running and the output would change to something like this:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
#### Reproducing the performance gains plot results on the Blog Post
|
||||||
|
When the run has finished successfully, its status would appeared as Completed and the output would change to something like this:
|
||||||
|
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Which is the output for an experiment run with three partitions and one GPU, notice that the reported processing time is 49.16 seconds just as depicted on the performance gains plot on the blog post
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
This output corresponds to a run with three partitions and two GPUs, notice that the reported processing time is 37.50 seconds just as depicted on the performance gains plot on the blog post
|
||||||
|
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
This output corresponds to an experiment run with three partitions and three GPUs, notice that the reported processing time is 24.40 seconds just as depicted on the performance gains plot on the blog post
|
||||||
|
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
This output corresponds to an experiment run with three partitions and four GPUs, notice that the reported processing time is 23.33 seconds just as depicted on the performance gains plot on the blogpost
|
||||||
|
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
This output corresponds to an experiment run with three partitions and using only CPU, notice that the reported processing time is 9 minutes and 1.21 seconds or 541.21 second just as depicted on the performance gains plot on the blog post
|
||||||
|
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
This output corresponds to an experiment run with nine partitions and four GPUs, notice that the notebook throws a warning signaling that the number of partitions exceed the maximum that the system can handle with those many GPUs and the run ends up failing, hence having and status of Failed.
|
||||||
|
|
||||||
|
|
||||||
|
##### Freeing Resources
|
||||||
|
In the last step the notebook deletes the compute target. (This step is optional especially if the min_nodes in the cluster is set to 0 with which the cluster will scale down to 0 nodes when there is no usage.)
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
### RAPIDS Script
|
||||||
|
The Master Notebook runs experiments by launching a RAPIDS script with different sets of parameters. In this section, the RAPIDS script, process_data.py in the material, is analyzed
|
||||||
|
|
||||||
|
The script first imports all the necessary libraries and parses the arguments passed by the Master Notebook.
|
||||||
|
|
||||||
|
The all internal functions to be used by the script are defined.
|
||||||
|
|
||||||
|
|
||||||
|
#### Wrapper Auxiliary Functions:
|
||||||
|
The below functions are wrappers for a configuration module for librmm, the RAPIDS Memory Manager python interface:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
A couple of other functions are wrappers for the submission of jobs to the DASK client:
|
||||||
|
|
||||||
|

|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
#### Data Loading Functions:
|
||||||
|
The data is loaded through the use of the following three functions
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
All three functions use library function cudf.read_csv(), cuDF version for the well known counterpart on Pandas.
|
||||||
|
|
||||||
|
|
||||||
|
#### Data Transformation and Feature Extraction Functions:
|
||||||
|
The raw data is transformed and processed to extract features by joining, slicing, grouping, aggregating, factoring, etc, the original dataframes just as is done with Pandas. The following functions in the script are used for that purpose:
|
||||||
|

|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
#### Main() Function
|
||||||
|
The previous functions are used in the Main function to accomplish several steps: Set up the Dask client, do all ETL operations, set up and train an XGBoost model, the function also assigns which data needs to be processed by each Dask client
|
||||||
|
|
||||||
|
|
||||||
|
##### Setting Up DASK client:
|
||||||
|
The following lines:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
Initialize and set up a DASK client with a number of workers corresponding to the number of GPUs to be used on the run. A successful execution of the set up will result on the following output:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
##### All ETL functions are used on single calls to process\_quarter_gpu, one per data partition
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
##### Concentrating the data assigned to each DASK worker
|
||||||
|
The partitions assigned to each worker are concatenated and set up for training.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
##### Setting Training Parameters
|
||||||
|
The parameters used for the training of a gradient boosted decision tree model are set up in the following code block:
|
||||||
|

|
||||||
|
|
||||||
|
Notice how the parameters are modified when using the CPU-only mode.
|
||||||
|
|
||||||
|
|
||||||
|
##### Launching the training of a gradient boosted decision tree model using XGBoost.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
The outputs of the script can be observed in the master notebook as the script is executed
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,409 +1,559 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Licensed under the MIT License."
|
"Licensed under the MIT License."
|
||||||
]
|
]
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# NVIDIA RAPIDS in Azure Machine Learning"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"The [RAPIDS](https://www.developer.nvidia.com/rapids) suite of software libraries from NVIDIA enables the execution of end-to-end data science and analytics pipelines entirely on GPUs. In many machine learning projects, a significant portion of the model training time is spent in setting up the data; this stage of the process is known as Extraction, Transformation and Loading, or ETL. By using the DataFrame API for ETL and GPU-capable ML algorithms in RAPIDS, data preparation and training models can be done in GPU-accelerated end-to-end pipelines without incurring serialization costs between the pipeline stages. This notebook demonstrates how to use NVIDIA RAPIDS to prepare data and train model in Azure.\n",
|
|
||||||
" \n",
|
|
||||||
"In this notebook, we will do the following:\n",
|
|
||||||
" \n",
|
|
||||||
"* Create an Azure Machine Learning Workspace\n",
|
|
||||||
"* Create an AMLCompute target\n",
|
|
||||||
"* Use a script to process our data and train a model\n",
|
|
||||||
"* Obtain the data required to run this sample\n",
|
|
||||||
"* Create an AML run configuration to launch a machine learning job\n",
|
|
||||||
"* Run the script to prepare data for training and train the model\n",
|
|
||||||
" \n",
|
|
||||||
"Prerequisites:\n",
|
|
||||||
"* An Azure subscription to create a Machine Learning Workspace\n",
|
|
||||||
"* Familiarity with the Azure ML SDK (refer to [notebook samples](https://github.com/Azure/MachineLearningNotebooks))\n",
|
|
||||||
"* A Jupyter notebook environment with Azure Machine Learning SDK installed. Refer to instructions to [setup the environment](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#local)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Verify if Azure ML SDK is installed"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import azureml.core\n",
|
|
||||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import os\n",
|
|
||||||
"from azureml.core import Workspace, Experiment\n",
|
|
||||||
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
|
|
||||||
"from azureml.data.data_reference import DataReference\n",
|
|
||||||
"from azureml.core.runconfig import RunConfiguration\n",
|
|
||||||
"from azureml.core import ScriptRunConfig\n",
|
|
||||||
"from azureml.widgets import RunDetails"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Create Azure ML Workspace"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"The following step is optional if you already have a workspace. If you want to use an existing workspace, then\n",
|
|
||||||
"skip this workspace creation step and move on to the next step to load the workspace.\n",
|
|
||||||
" \n",
|
|
||||||
"<font color='red'>Important</font>: in the code cell below, be sure to set the correct values for the subscription_id, \n",
|
|
||||||
"resource_group, workspace_name, region before executing this code cell."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"subscription_id = os.environ.get(\"SUBSCRIPTION_ID\", \"<subscription_id>\")\n",
|
|
||||||
"resource_group = os.environ.get(\"RESOURCE_GROUP\", \"<resource_group>\")\n",
|
|
||||||
"workspace_name = os.environ.get(\"WORKSPACE_NAME\", \"<workspace_name>\")\n",
|
|
||||||
"workspace_region = os.environ.get(\"WORKSPACE_REGION\", \"<region>\")\n",
|
|
||||||
"\n",
|
|
||||||
"ws = Workspace.create(workspace_name, subscription_id=subscription_id, resource_group=resource_group, location=workspace_region)\n",
|
|
||||||
"\n",
|
|
||||||
"# write config to a local directory for future use\n",
|
|
||||||
"ws.write_config()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Load existing Workspace"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"ws = Workspace.from_config()\n",
|
|
||||||
"# if a locally-saved configuration file for the workspace is not available, use the following to load workspace\n",
|
|
||||||
"# ws = Workspace(subscription_id=subscription_id, resource_group=resource_group, workspace_name=workspace_name)\n",
|
|
||||||
"print('Workspace name: ' + ws.name, \n",
|
|
||||||
" 'Azure region: ' + ws.location, \n",
|
|
||||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
|
||||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')\n",
|
|
||||||
"\n",
|
|
||||||
"scripts_folder = \"scripts_folder\"\n",
|
|
||||||
"\n",
|
|
||||||
"if not os.path.isdir(scripts_folder):\n",
|
|
||||||
" os.mkdir(scripts_folder)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Create AML Compute Target"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Because NVIDIA RAPIDS requires P40 or V100 GPUs, the user needs to specify compute targets from one of [NC_v3](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv3-series), [NC_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv2-series), [ND](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#nd-series) or [ND_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ndv2-series-preview) virtual machine types in Azure; these are the families of virtual machines in Azure that are provisioned with these GPUs.\n",
|
|
||||||
" \n",
|
|
||||||
"Pick one of the supported VM SKUs based on the number of GPUs you want to use for ETL and training in RAPIDS.\n",
|
|
||||||
" \n",
|
|
||||||
"The script in this notebook is implemented for single-machine scenarios. An example supporting multiple nodes will be published later."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"gpu_cluster_name = \"gpucluster\"\n",
|
|
||||||
"\n",
|
|
||||||
"if gpu_cluster_name in ws.compute_targets:\n",
|
|
||||||
" gpu_cluster = ws.compute_targets[gpu_cluster_name]\n",
|
|
||||||
" if gpu_cluster and type(gpu_cluster) is AmlCompute:\n",
|
|
||||||
" print('found compute target. just use it. ' + gpu_cluster_name)\n",
|
|
||||||
"else:\n",
|
|
||||||
" print(\"creating new cluster\")\n",
|
|
||||||
" # vm_size parameter below could be modified to one of the RAPIDS-supported VM types\n",
|
|
||||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"Standard_NC6s_v2\", min_nodes=1, max_nodes = 1)\n",
|
|
||||||
"\n",
|
|
||||||
" # create the cluster\n",
|
|
||||||
" gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, provisioning_config)\n",
|
|
||||||
" gpu_cluster.wait_for_completion(show_output=True)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Script to process data and train model"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"The _process_data.py_ script used in the step below is a slightly modified implementation of [RAPIDS E2E example](https://github.com/rapidsai/notebooks/blob/master/mortgage/E2E.ipynb)."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# copy process_data.py into the script folder\n",
|
|
||||||
"import shutil\n",
|
|
||||||
"shutil.copy('./process_data.py', os.path.join(scripts_folder, 'process_data.py'))\n",
|
|
||||||
"\n",
|
|
||||||
"with open(os.path.join(scripts_folder, './process_data.py'), 'r') as process_data_script:\n",
|
|
||||||
" print(process_data_script.read())"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Data required to run this sample"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"This sample uses [Fannie Mae\u00e2\u20ac\u2122s Single-Family Loan Performance Data](http://www.fanniemae.com/portal/funding-the-market/data/loan-performance-data.html). Refer to the 'Available mortgage datasets' section in [instructions](https://rapidsai.github.io/demos/datasets/mortgage-data) to get sample data.\n",
|
|
||||||
"\n",
|
|
||||||
"Once you obtain access to the data, you will need to make this data available in an [Azure Machine Learning Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data), for use in this sample."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"<font color='red'>Important</font>: The following step assumes the data is uploaded to the Workspace's default data store under a folder named 'mortgagedata2000_01'. Note that uploading data to the Workspace's default data store is not necessary and the data can be referenced from any datastore, e.g., from Azure Blob or File service, once it is added as a datastore to the workspace. The path_on_datastore parameter needs to be updated, depending on where the data is available. The directory where the data is available should have the following folder structure, as the process_data.py script expects this directory structure:\n",
|
|
||||||
"* _<data directory>_/acq\n",
|
|
||||||
"* _<data directory>_/perf\n",
|
|
||||||
"* _names.csv_\n",
|
|
||||||
"\n",
|
|
||||||
"The 'acq' and 'perf' refer to directories containing data files. The _<data directory>_ is the path specified in _path_on_datastore_ parameter in the step below."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"ds = ws.get_default_datastore()\n",
|
|
||||||
"\n",
|
|
||||||
"# download and uncompress data in a local directory before uploading to data store\n",
|
|
||||||
"# directory specified in src_dir parameter below should have the acq, perf directories with data and names.csv file\n",
|
|
||||||
"# ds.upload(src_dir='<local directory that has data>', target_path='mortgagedata2000_01', overwrite=True, show_progress=True)\n",
|
|
||||||
"\n",
|
|
||||||
"# data already uploaded to the datastore\n",
|
|
||||||
"data_ref = DataReference(data_reference_name='data', datastore=ds, path_on_datastore='mortgagedata2000_01')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Create AML run configuration to launch a machine learning job"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"AML allows the option of using existing Docker images with prebuilt conda environments. The following step use an existing image from [Docker Hub](https://hub.docker.com/r/rapidsai/rapidsai/)."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"run_config = RunConfiguration()\n",
|
|
||||||
"run_config.framework = 'python'\n",
|
|
||||||
"run_config.environment.python.user_managed_dependencies = True\n",
|
|
||||||
"# use conda environment named 'rapids' available in the Docker image\n",
|
|
||||||
"# this conda environment does not include azureml-defaults package that is required for using AML functionality like metrics tracking, model management etc.\n",
|
|
||||||
"run_config.environment.python.interpreter_path = '/conda/envs/rapids/bin/python'\n",
|
|
||||||
"run_config.target = gpu_cluster_name\n",
|
|
||||||
"run_config.environment.docker.enabled = True\n",
|
|
||||||
"run_config.environment.docker.gpu_support = True\n",
|
|
||||||
"# if registry is not mentioned the image is pulled from Docker Hub\n",
|
|
||||||
"run_config.environment.docker.base_image = \"rapidsai/rapidsai:cuda9.2_ubuntu16.04_root\"\n",
|
|
||||||
"run_config.environment.spark.precache_packages = False\n",
|
|
||||||
"run_config.data_references={'data':data_ref.to_config()}"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Wrapper function to submit Azure Machine Learning experiment"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# parameter cpu_predictor indicates if training should be done on CPU. If set to true, GPUs are used *only* for ETL and *not* for training\n",
|
|
||||||
"# parameter num_gpu indicates number of GPUs to use among the GPUs available in the VM for ETL and if cpu_predictor is false, for training as well \n",
|
|
||||||
"def run_rapids_experiment(cpu_training, gpu_count):\n",
|
|
||||||
" # any value between 1-4 is allowed here depending the type of VMs available in gpu_cluster\n",
|
|
||||||
" if gpu_count not in [1, 2, 3, 4]:\n",
|
|
||||||
" raise Exception('Value specified for the number of GPUs to use {0} is invalid'.format(gpu_count))\n",
|
|
||||||
"\n",
|
|
||||||
" # following data partition mapping is empirical (specific to GPUs used and current data partitioning scheme) and may need to be tweaked\n",
|
|
||||||
" gpu_count_data_partition_mapping = {1: 2, 2: 4, 3: 5, 4: 7}\n",
|
|
||||||
" part_count = gpu_count_data_partition_mapping[gpu_count]\n",
|
|
||||||
"\n",
|
|
||||||
" end_year = 2000\n",
|
|
||||||
" if gpu_count > 2:\n",
|
|
||||||
" end_year = 2001 # use more data with more GPUs\n",
|
|
||||||
"\n",
|
|
||||||
" src = ScriptRunConfig(source_directory=scripts_folder, \n",
|
|
||||||
" script='process_data.py', \n",
|
|
||||||
" arguments = ['--num_gpu', gpu_count, '--data_dir', str(data_ref),\n",
|
|
||||||
" '--part_count', part_count, '--end_year', end_year,\n",
|
|
||||||
" '--cpu_predictor', cpu_training\n",
|
|
||||||
" ],\n",
|
|
||||||
" run_config=run_config\n",
|
|
||||||
" )\n",
|
|
||||||
"\n",
|
|
||||||
" exp = Experiment(ws, 'rapidstest')\n",
|
|
||||||
" run = exp.submit(config=src)\n",
|
|
||||||
" RunDetails(run).show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Submit experiment (ETL & training on GPU)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"cpu_predictor = False\n",
|
|
||||||
"# the value for num_gpu should be less than or equal to the number of GPUs available in the VM\n",
|
|
||||||
"num_gpu = 1 \n",
|
|
||||||
"# train using CPU, use GPU for both ETL and training\n",
|
|
||||||
"run_rapids_experiment(cpu_predictor, num_gpu)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Submit experiment (ETL on GPU, training on CPU)\n",
|
|
||||||
"\n",
|
|
||||||
"To observe performance difference between GPU-accelerated RAPIDS based training with CPU-only training, set 'cpu_predictor' predictor to 'True' and rerun the experiment"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"cpu_predictor = True\n",
|
|
||||||
"# the value for num_gpu should be less than or equal to the number of GPUs available in the VM\n",
|
|
||||||
"num_gpu = 1\n",
|
|
||||||
"# train using CPU, use GPU for ETL\n",
|
|
||||||
"run_rapids_experiment(cpu_predictor, num_gpu)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Delete cluster"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# delete the cluster\n",
|
|
||||||
"# gpu_cluster.delete()"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"authors": [
|
|
||||||
{
|
|
||||||
"name": "ksivas"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3.6",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python36"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.6.6"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
{
|
||||||
"nbformat_minor": 2
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# NVIDIA RAPIDS in Azure Machine Learning"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The [RAPIDS](https://www.developer.nvidia.com/rapids) suite of software libraries from NVIDIA enables the execution of end-to-end data science and analytics pipelines entirely on GPUs. In many machine learning projects, a significant portion of the model training time is spent in setting up the data; this stage of the process is known as Extraction, Transformation and Loading, or ETL. By using the DataFrame API for ETLÂ and GPU-capable ML algorithms in RAPIDS, data preparation and training models can be done in GPU-accelerated end-to-end pipelines without incurring serialization costs between the pipeline stages. This notebook demonstrates how to use NVIDIA RAPIDS to prepare data and train model in Azure.\n",
|
||||||
|
" \n",
|
||||||
|
"In this notebook, we will do the following:\n",
|
||||||
|
" \n",
|
||||||
|
"* Create an Azure Machine Learning Workspace\n",
|
||||||
|
"* Create an AMLCompute target\n",
|
||||||
|
"* Use a script to process our data and train a model\n",
|
||||||
|
"* Obtain the data required to run this sample\n",
|
||||||
|
"* Create an AML run configuration to launch a machine learning job\n",
|
||||||
|
"* Run the script to prepare data for training and train the model\n",
|
||||||
|
" \n",
|
||||||
|
"Prerequisites:\n",
|
||||||
|
"* An Azure subscription to create a Machine Learning Workspace\n",
|
||||||
|
"* Familiarity with the Azure ML SDK (refer to [notebook samples](https://github.com/Azure/MachineLearningNotebooks))\n",
|
||||||
|
"* A Jupyter notebook environment with Azure Machine Learning SDK installed. Refer to instructions to [setup the environment](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#local)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Verify if Azure ML SDK is installed"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import azureml.core\n",
|
||||||
|
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os\n",
|
||||||
|
"from azureml.core import Workspace, Experiment\n",
|
||||||
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||||
|
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
|
||||||
|
"from azureml.data.data_reference import DataReference\n",
|
||||||
|
"from azureml.core.runconfig import RunConfiguration\n",
|
||||||
|
"from azureml.core import ScriptRunConfig\n",
|
||||||
|
"from azureml.widgets import RunDetails"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Create Azure ML Workspace"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The following step is optional if you already have a workspace. If you want to use an existing workspace, then\n",
|
||||||
|
"skip this workspace creation step and move on to the next step to load the workspace.\n",
|
||||||
|
" \n",
|
||||||
|
"<font color='red'>Important</font>: in the code cell below, be sure to set the correct values for the subscription_id, \n",
|
||||||
|
"resource_group, workspace_name, region before executing this code cell."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"subscription_id = os.environ.get(\"SUBSCRIPTION_ID\", \"<subscription_id>\")\n",
|
||||||
|
"resource_group = os.environ.get(\"RESOURCE_GROUP\", \"<resource_group>\")\n",
|
||||||
|
"workspace_name = os.environ.get(\"WORKSPACE_NAME\", \"<workspace_name>\")\n",
|
||||||
|
"workspace_region = os.environ.get(\"WORKSPACE_REGION\", \"<region>\")\n",
|
||||||
|
"\n",
|
||||||
|
"ws = Workspace.create(workspace_name, subscription_id=subscription_id, resource_group=resource_group, location=workspace_region)\n",
|
||||||
|
"\n",
|
||||||
|
"# write config to a local directory for future use\n",
|
||||||
|
"ws.write_config()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Load existing Workspace"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"# if a locally-saved configuration file for the workspace is not available, use the following to load workspace\n",
|
||||||
|
"# ws = Workspace(subscription_id=subscription_id, resource_group=resource_group, workspace_name=workspace_name)\n",
|
||||||
|
"print('Workspace name: ' + ws.name, \n",
|
||||||
|
" 'Azure region: ' + ws.location, \n",
|
||||||
|
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||||
|
" 'Resource group: ' + ws.resource_group, sep = '\\n')\n",
|
||||||
|
"\n",
|
||||||
|
"scripts_folder = \"scripts_folder\"\n",
|
||||||
|
"\n",
|
||||||
|
"if not os.path.isdir(scripts_folder):\n",
|
||||||
|
" os.mkdir(scripts_folder)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Create AML Compute Target"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Because NVIDIA RAPIDS requires P40 or V100 GPUs, the user needs to specify compute targets from one of [NC_v3](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv3-series), [NC_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv2-series), [ND](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#nd-series) or [ND_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ndv2-series-preview) virtual machine types in Azure; these are the families of virtual machines in Azure that are provisioned with these GPUs.\n",
|
||||||
|
" \n",
|
||||||
|
"Pick one of the supported VM SKUs based on the number of GPUs you want to use for ETL and training in RAPIDS.\n",
|
||||||
|
" \n",
|
||||||
|
"The script in this notebook is implemented for single-machine scenarios. An example supporting multiple nodes will be published later."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"gpu_cluster_name = \"gpucluster\"\n",
|
||||||
|
"\n",
|
||||||
|
"if gpu_cluster_name in ws.compute_targets:\n",
|
||||||
|
" gpu_cluster = ws.compute_targets[gpu_cluster_name]\n",
|
||||||
|
" if gpu_cluster and type(gpu_cluster) is AmlCompute:\n",
|
||||||
|
" print('found compute target. just use it. ' + gpu_cluster_name)\n",
|
||||||
|
"else:\n",
|
||||||
|
" print(\"creating new cluster\")\n",
|
||||||
|
" # vm_size parameter below could be modified to one of the RAPIDS-supported VM types\n",
|
||||||
|
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"Standard_NC6s_v2\", min_nodes=1, max_nodes = 1)\n",
|
||||||
|
"\n",
|
||||||
|
" # create the cluster\n",
|
||||||
|
" gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, provisioning_config)\n",
|
||||||
|
" gpu_cluster.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Script to process data and train model"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The _process_data.py_ script used in the step below is a slightly modified implementation of [RAPIDS E2E example](https://github.com/rapidsai/notebooks/blob/master/mortgage/E2E.ipynb)."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# copy process_data.py into the script folder\n",
|
||||||
|
"import shutil\n",
|
||||||
|
"shutil.copy('./process_data.py', os.path.join(scripts_folder, 'process_data.py'))\n",
|
||||||
|
"\n",
|
||||||
|
"with open(os.path.join(scripts_folder, './process_data.py'), 'r') as process_data_script:\n",
|
||||||
|
" print(process_data_script.read())"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Data required to run this sample"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"This sample uses [Fannie Mae's Single-Family Loan Performance Data](http://www.fanniemae.com/portal/funding-the-market/data/loan-performance-data.html). Once you obtain access to the data, you will need to make this data available in an [Azure Machine Learning Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data), for use in this sample. The following code shows how to do that."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Downloading Data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<font color='red'>Important</font>: Python package progressbar2 is necessary to run the following cell. If it is not available in your environment where this notebook is running, please install it."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import tarfile\n",
|
||||||
|
"import hashlib\n",
|
||||||
|
"from urllib.request import urlretrieve\n",
|
||||||
|
"from progressbar import ProgressBar\n",
|
||||||
|
"\n",
|
||||||
|
"def validate_downloaded_data(path):\n",
|
||||||
|
" if(os.path.isdir(path) and os.path.exists(path + '//names.csv')) :\n",
|
||||||
|
" if(os.path.isdir(path + '//acq' ) and len(os.listdir(path + '//acq')) == 8):\n",
|
||||||
|
" if(os.path.isdir(path + '//perf' ) and len(os.listdir(path + '//perf')) == 11):\n",
|
||||||
|
" print(\"Data has been downloaded and decompressed at: {0}\".format(path))\n",
|
||||||
|
" return True\n",
|
||||||
|
" print(\"Data has not been downloaded and decompressed\")\n",
|
||||||
|
" return False\n",
|
||||||
|
"\n",
|
||||||
|
"def show_progress(count, block_size, total_size):\n",
|
||||||
|
" global pbar\n",
|
||||||
|
" global processed\n",
|
||||||
|
" \n",
|
||||||
|
" if count == 0:\n",
|
||||||
|
" pbar = ProgressBar(maxval=total_size)\n",
|
||||||
|
" processed = 0\n",
|
||||||
|
" \n",
|
||||||
|
" processed += block_size\n",
|
||||||
|
" processed = min(processed,total_size)\n",
|
||||||
|
" pbar.update(processed)\n",
|
||||||
|
"\n",
|
||||||
|
" \n",
|
||||||
|
"def download_file(fileroot):\n",
|
||||||
|
" filename = fileroot + '.tgz'\n",
|
||||||
|
" if(not os.path.exists(filename) or hashlib.md5(open(filename, 'rb').read()).hexdigest() != '82dd47135053303e9526c2d5c43befd5' ):\n",
|
||||||
|
" url_format = 'http://rapidsai-data.s3-website.us-east-2.amazonaws.com/notebook-mortgage-data/{0}.tgz'\n",
|
||||||
|
" url = url_format.format(fileroot)\n",
|
||||||
|
" print(\"...Downloading file :{0}\".format(filename))\n",
|
||||||
|
" urlretrieve(url, filename,show_progress)\n",
|
||||||
|
" pbar.finish()\n",
|
||||||
|
" print(\"...File :{0} finished downloading\".format(filename))\n",
|
||||||
|
" else:\n",
|
||||||
|
" print(\"...File :{0} has been downloaded already\".format(filename))\n",
|
||||||
|
" return filename\n",
|
||||||
|
"\n",
|
||||||
|
"def decompress_file(filename,path):\n",
|
||||||
|
" tar = tarfile.open(filename)\n",
|
||||||
|
" print(\"...Getting information from {0} about files to decompress\".format(filename))\n",
|
||||||
|
" members = tar.getmembers()\n",
|
||||||
|
" numFiles = len(members)\n",
|
||||||
|
" so_far = 0\n",
|
||||||
|
" for member_info in members:\n",
|
||||||
|
" tar.extract(member_info,path=path)\n",
|
||||||
|
" show_progress(so_far, 1, numFiles)\n",
|
||||||
|
" so_far += 1\n",
|
||||||
|
" pbar.finish()\n",
|
||||||
|
" print(\"...All {0} files have been decompressed\".format(numFiles))\n",
|
||||||
|
" tar.close()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"fileroot = 'mortgage_2000-2001'\n",
|
||||||
|
"path = '.\\\\{0}'.format(fileroot)\n",
|
||||||
|
"pbar = None\n",
|
||||||
|
"processed = 0\n",
|
||||||
|
"\n",
|
||||||
|
"if(not validate_downloaded_data(path)):\n",
|
||||||
|
" print(\"Downloading and Decompressing Input Data\")\n",
|
||||||
|
" filename = download_file(fileroot)\n",
|
||||||
|
" decompress_file(filename,path)\n",
|
||||||
|
" print(\"Input Data has been Downloaded and Decompressed\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Uploading Data to Workspace"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ds = ws.get_default_datastore()\n",
|
||||||
|
"\n",
|
||||||
|
"# download and uncompress data in a local directory before uploading to data store\n",
|
||||||
|
"# directory specified in src_dir parameter below should have the acq, perf directories with data and names.csv file\n",
|
||||||
|
"ds.upload(src_dir=path, target_path=fileroot, overwrite=True, show_progress=True)\n",
|
||||||
|
"\n",
|
||||||
|
"# data already uploaded to the datastore\n",
|
||||||
|
"data_ref = DataReference(data_reference_name='data', datastore=ds, path_on_datastore=fileroot)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Create AML run configuration to launch a machine learning job"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"RunConfiguration is used to submit jobs to Azure Machine Learning service. When creating RunConfiguration for a job, users can either \n",
|
||||||
|
"1. specify a Docker image with prebuilt conda environment and use it without any modifications to run the job, or \n",
|
||||||
|
"2. specify a Docker image as the base image and conda or pip packages as dependnecies to let AML build a new Docker image with a conda environment containing specified dependencies to use in the job\n",
|
||||||
|
"\n",
|
||||||
|
"The second option is the recommended option in AML. \n",
|
||||||
|
"The following steps have code for both options. You can pick the one that is more appropriate for your requirements. "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Specify prebuilt conda environment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The following code shows how to use an existing image from [Docker Hub](https://hub.docker.com/r/rapidsai/rapidsai/) that has a prebuilt conda environment named 'rapids' when creating a RunConfiguration. Note that this conda environment does not include azureml-defaults package that is required for using AML functionality like metrics tracking, model management etc. This package is automatically installed when you use 'Specify package dependencies' option and that is why it is the recommended option to create RunConfiguraiton in AML."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"run_config = RunConfiguration()\n",
|
||||||
|
"run_config.framework = 'python'\n",
|
||||||
|
"run_config.environment.python.user_managed_dependencies = True\n",
|
||||||
|
"run_config.environment.python.interpreter_path = '/conda/envs/rapids/bin/python'\n",
|
||||||
|
"run_config.target = gpu_cluster_name\n",
|
||||||
|
"run_config.environment.docker.enabled = True\n",
|
||||||
|
"run_config.environment.docker.gpu_support = True\n",
|
||||||
|
"run_config.environment.docker.base_image = \"rapidsai/rapidsai:cuda9.2-runtime-ubuntu18.04\"\n",
|
||||||
|
"# run_config.environment.docker.base_image_registry.address = '<registry_url>' # not required if the base_image is in Docker hub\n",
|
||||||
|
"# run_config.environment.docker.base_image_registry.username = '<user_name>' # needed only for private images\n",
|
||||||
|
"# run_config.environment.docker.base_image_registry.password = '<password>' # needed only for private images\n",
|
||||||
|
"run_config.environment.spark.precache_packages = False\n",
|
||||||
|
"run_config.data_references={'data':data_ref.to_config()}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Specify package dependencies"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The following code shows how to list package dependencies in a conda environment definition file (rapids.yml) when creating a RunConfiguration"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# cd = CondaDependencies(conda_dependencies_file_path='rapids.yml')\n",
|
||||||
|
"# run_config = RunConfiguration(conda_dependencies=cd)\n",
|
||||||
|
"# run_config.framework = 'python'\n",
|
||||||
|
"# run_config.target = gpu_cluster_name\n",
|
||||||
|
"# run_config.environment.docker.enabled = True\n",
|
||||||
|
"# run_config.environment.docker.gpu_support = True\n",
|
||||||
|
"# run_config.environment.docker.base_image = \"<image>\"\n",
|
||||||
|
"# run_config.environment.docker.base_image_registry.address = '<registry_url>' # not required if the base_image is in Docker hub\n",
|
||||||
|
"# run_config.environment.docker.base_image_registry.username = '<user_name>' # needed only for private images\n",
|
||||||
|
"# run_config.environment.docker.base_image_registry.password = '<password>' # needed only for private images\n",
|
||||||
|
"# run_config.environment.spark.precache_packages = False\n",
|
||||||
|
"# run_config.data_references={'data':data_ref.to_config()}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Wrapper function to submit Azure Machine Learning experiment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# parameter cpu_predictor indicates if training should be done on CPU. If set to true, GPUs are used *only* for ETL and *not* for training\n",
|
||||||
|
"# parameter num_gpu indicates number of GPUs to use among the GPUs available in the VM for ETL and if cpu_predictor is false, for training as well \n",
|
||||||
|
"def run_rapids_experiment(cpu_training, gpu_count, part_count):\n",
|
||||||
|
" # any value between 1-4 is allowed here depending the type of VMs available in gpu_cluster\n",
|
||||||
|
" if gpu_count not in [1, 2, 3, 4]:\n",
|
||||||
|
" raise Exception('Value specified for the number of GPUs to use {0} is invalid'.format(gpu_count))\n",
|
||||||
|
"\n",
|
||||||
|
" # following data partition mapping is empirical (specific to GPUs used and current data partitioning scheme) and may need to be tweaked\n",
|
||||||
|
" max_gpu_count_data_partition_mapping = {1: 3, 2: 4, 3: 6, 4: 8}\n",
|
||||||
|
" \n",
|
||||||
|
" if part_count > max_gpu_count_data_partition_mapping[gpu_count]:\n",
|
||||||
|
" print(\"Too many partitions for the number of GPUs, exceeding memory threshold\")\n",
|
||||||
|
" \n",
|
||||||
|
" if part_count > 11:\n",
|
||||||
|
" print(\"Warning: Maximum number of partitions available is 11\")\n",
|
||||||
|
" part_count = 11\n",
|
||||||
|
" \n",
|
||||||
|
" end_year = 2000\n",
|
||||||
|
" \n",
|
||||||
|
" if part_count > 4:\n",
|
||||||
|
" end_year = 2001 # use more data with more GPUs\n",
|
||||||
|
"\n",
|
||||||
|
" src = ScriptRunConfig(source_directory=scripts_folder, \n",
|
||||||
|
" script='process_data.py', \n",
|
||||||
|
" arguments = ['--num_gpu', gpu_count, '--data_dir', str(data_ref),\n",
|
||||||
|
" '--part_count', part_count, '--end_year', end_year,\n",
|
||||||
|
" '--cpu_predictor', cpu_training\n",
|
||||||
|
" ],\n",
|
||||||
|
" run_config=run_config\n",
|
||||||
|
" )\n",
|
||||||
|
"\n",
|
||||||
|
" exp = Experiment(ws, 'rapidstest')\n",
|
||||||
|
" run = exp.submit(config=src)\n",
|
||||||
|
" RunDetails(run).show()\n",
|
||||||
|
" return run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Submit experiment (ETL & training on GPU)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"cpu_predictor = False\n",
|
||||||
|
"# the value for num_gpu should be less than or equal to the number of GPUs available in the VM\n",
|
||||||
|
"num_gpu = 1\n",
|
||||||
|
"data_part_count = 1\n",
|
||||||
|
"# train using CPU, use GPU for both ETL and training\n",
|
||||||
|
"run = run_rapids_experiment(cpu_predictor, num_gpu, data_part_count)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Submit experiment (ETL on GPU, training on CPU)\n",
|
||||||
|
"\n",
|
||||||
|
"To observe performance difference between GPU-accelerated RAPIDS based training with CPU-only training, set 'cpu_predictor' predictor to 'True' and rerun the experiment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"cpu_predictor = True\n",
|
||||||
|
"# the value for num_gpu should be less than or equal to the number of GPUs available in the VM\n",
|
||||||
|
"num_gpu = 1\n",
|
||||||
|
"data_part_count = 1\n",
|
||||||
|
"# train using CPU, use GPU for ETL\n",
|
||||||
|
"run = run_rapids_experiment(cpu_predictor, num_gpu, data_part_count)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Delete cluster"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# delete the cluster\n",
|
||||||
|
"# gpu_cluster.delete()"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "ksivas"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.6",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python36"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
}
|
}
|
||||||
|
|||||||
BIN
contrib/RAPIDS/imgs/2GPUs.png
Normal file
|
After Width: | Height: | Size: 180 KiB |
BIN
contrib/RAPIDS/imgs/3GPUs.png
Normal file
|
After Width: | Height: | Size: 183 KiB |
BIN
contrib/RAPIDS/imgs/4gpus.png
Normal file
|
After Width: | Height: | Size: 183 KiB |
BIN
contrib/RAPIDS/imgs/CPUBase.png
Normal file
|
After Width: | Height: | Size: 177 KiB |
BIN
contrib/RAPIDS/imgs/DLF1.png
Normal file
|
After Width: | Height: | Size: 5.0 KiB |
BIN
contrib/RAPIDS/imgs/DLF2.png
Normal file
|
After Width: | Height: | Size: 4.8 KiB |
BIN
contrib/RAPIDS/imgs/DLF3.png
Normal file
|
After Width: | Height: | Size: 3.2 KiB |
BIN
contrib/RAPIDS/imgs/Dask2.png
Normal file
|
After Width: | Height: | Size: 70 KiB |
BIN
contrib/RAPIDS/imgs/ETL.png
Normal file
|
After Width: | Height: | Size: 64 KiB |
BIN
contrib/RAPIDS/imgs/NotebookHome.png
Normal file
|
After Width: | Height: | Size: 554 KiB |
BIN
contrib/RAPIDS/imgs/OOM.png
Normal file
|
After Width: | Height: | Size: 213 KiB |
BIN
contrib/RAPIDS/imgs/PArameters.png
Normal file
|
After Width: | Height: | Size: 58 KiB |
BIN
contrib/RAPIDS/imgs/WorkSpaceSetUp.png
Normal file
|
After Width: | Height: | Size: 34 KiB |
BIN
contrib/RAPIDS/imgs/clusterdelete.png
Normal file
|
After Width: | Height: | Size: 4.5 KiB |
BIN
contrib/RAPIDS/imgs/completed.png
Normal file
|
After Width: | Height: | Size: 187 KiB |
BIN
contrib/RAPIDS/imgs/daskini.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
BIN
contrib/RAPIDS/imgs/daskoutput.png
Normal file
|
After Width: | Height: | Size: 9.7 KiB |
BIN
contrib/RAPIDS/imgs/datastore.png
Normal file
|
After Width: | Height: | Size: 163 KiB |
BIN
contrib/RAPIDS/imgs/dcf1.png
Normal file
|
After Width: | Height: | Size: 3.5 KiB |
BIN
contrib/RAPIDS/imgs/dcf2.png
Normal file
|
After Width: | Height: | Size: 2.9 KiB |
BIN
contrib/RAPIDS/imgs/dcf3.png
Normal file
|
After Width: | Height: | Size: 2.5 KiB |
BIN
contrib/RAPIDS/imgs/dcf4.png
Normal file
|
After Width: | Height: | Size: 3.0 KiB |
BIN
contrib/RAPIDS/imgs/downamddecom.png
Normal file
|
After Width: | Height: | Size: 60 KiB |
BIN
contrib/RAPIDS/imgs/fef1.png
Normal file
|
After Width: | Height: | Size: 3.5 KiB |
BIN
contrib/RAPIDS/imgs/fef2.png
Normal file
|
After Width: | Height: | Size: 3.9 KiB |
BIN
contrib/RAPIDS/imgs/fef3.png
Normal file
|
After Width: | Height: | Size: 5.0 KiB |
BIN
contrib/RAPIDS/imgs/fef4.png
Normal file
|
After Width: | Height: | Size: 4.0 KiB |
BIN
contrib/RAPIDS/imgs/fef5.png
Normal file
|
After Width: | Height: | Size: 4.1 KiB |
BIN
contrib/RAPIDS/imgs/fef6.png
Normal file
|
After Width: | Height: | Size: 4.5 KiB |
BIN
contrib/RAPIDS/imgs/fef7.png
Normal file
|
After Width: | Height: | Size: 5.1 KiB |
BIN
contrib/RAPIDS/imgs/fef8.png
Normal file
|
After Width: | Height: | Size: 3.9 KiB |
BIN
contrib/RAPIDS/imgs/fef9.png
Normal file
|
After Width: | Height: | Size: 3.6 KiB |
BIN
contrib/RAPIDS/imgs/install2.png
Normal file
|
After Width: | Height: | Size: 120 KiB |
BIN
contrib/RAPIDS/imgs/installation.png
Normal file
|
After Width: | Height: | Size: 55 KiB |
BIN
contrib/RAPIDS/imgs/queue.png
Normal file
|
After Width: | Height: | Size: 52 KiB |
BIN
contrib/RAPIDS/imgs/running.png
Normal file
|
After Width: | Height: | Size: 181 KiB |
BIN
contrib/RAPIDS/imgs/saved_workspace.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
BIN
contrib/RAPIDS/imgs/scriptuploading.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
contrib/RAPIDS/imgs/submission1.png
Normal file
|
After Width: | Height: | Size: 19 KiB |
BIN
contrib/RAPIDS/imgs/target_creation.png
Normal file
|
After Width: | Height: | Size: 45 KiB |
BIN
contrib/RAPIDS/imgs/targeterror1.png
Normal file
|
After Width: | Height: | Size: 31 KiB |
BIN
contrib/RAPIDS/imgs/targeterror2.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
contrib/RAPIDS/imgs/targetsuccess.png
Normal file
|
After Width: | Height: | Size: 10 KiB |
BIN
contrib/RAPIDS/imgs/training.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
contrib/RAPIDS/imgs/wap1.png
Normal file
|
After Width: | Height: | Size: 2.4 KiB |
BIN
contrib/RAPIDS/imgs/wap2.png
Normal file
|
After Width: | Height: | Size: 2.5 KiB |
BIN
contrib/RAPIDS/imgs/wap3.png
Normal file
|
After Width: | Height: | Size: 3.4 KiB |
BIN
contrib/RAPIDS/imgs/wap4.png
Normal file
|
After Width: | Height: | Size: 4.8 KiB |
BIN
contrib/RAPIDS/imgs/wrapper.png
Normal file
|
After Width: | Height: | Size: 99 KiB |
@@ -1,9 +1,9 @@
|
|||||||
# License Info: https://github.com/rapidsai/notebooks/blob/master/LICENSE
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import datetime
|
import datetime
|
||||||
import dask_xgboost as dxgb_gpu
|
import dask_xgboost as dxgb_gpu
|
||||||
import dask
|
import dask
|
||||||
import dask_cudf
|
import dask_cudf
|
||||||
|
from dask_cuda import LocalCUDACluster
|
||||||
from dask.delayed import delayed
|
from dask.delayed import delayed
|
||||||
from dask.distributed import Client, wait
|
from dask.distributed import Client, wait
|
||||||
import xgboost as xgb
|
import xgboost as xgb
|
||||||
@@ -15,53 +15,6 @@ from glob import glob
|
|||||||
import os
|
import os
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
parser = argparse.ArgumentParser("rapidssample")
|
|
||||||
parser.add_argument("--data_dir", type=str, help="location of data")
|
|
||||||
parser.add_argument("--num_gpu", type=int, help="Number of GPUs to use", default=1)
|
|
||||||
parser.add_argument("--part_count", type=int, help="Number of data files to train against", default=2)
|
|
||||||
parser.add_argument("--end_year", type=int, help="Year to end the data load", default=2000)
|
|
||||||
parser.add_argument("--cpu_predictor", type=str, help="Flag to use CPU for prediction", default='False')
|
|
||||||
parser.add_argument('-f', type=str, default='') # added for notebook execution scenarios
|
|
||||||
args = parser.parse_args()
|
|
||||||
data_dir = args.data_dir
|
|
||||||
num_gpu = args.num_gpu
|
|
||||||
part_count = args.part_count
|
|
||||||
end_year = args.end_year
|
|
||||||
cpu_predictor = args.cpu_predictor.lower() in ('yes', 'true', 't', 'y', '1')
|
|
||||||
|
|
||||||
print('data_dir = {0}'.format(data_dir))
|
|
||||||
print('num_gpu = {0}'.format(num_gpu))
|
|
||||||
print('part_count = {0}'.format(part_count))
|
|
||||||
part_count = part_count + 1 # adding one because the usage below is not inclusive
|
|
||||||
print('end_year = {0}'.format(end_year))
|
|
||||||
print('cpu_predictor = {0}'.format(cpu_predictor))
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
cmd = "hostname --all-ip-addresses"
|
|
||||||
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
|
|
||||||
output, error = process.communicate()
|
|
||||||
IPADDR = str(output.decode()).split()[0]
|
|
||||||
print('IPADDR is {0}'.format(IPADDR))
|
|
||||||
|
|
||||||
cmd = "/rapids/notebooks/utils/dask-setup.sh 0"
|
|
||||||
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
|
|
||||||
output, error = process.communicate()
|
|
||||||
|
|
||||||
cmd = "/rapids/notebooks/utils/dask-setup.sh rapids " + str(num_gpu) + " 8786 8787 8790 " + str(IPADDR) + " MASTER"
|
|
||||||
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
|
|
||||||
output, error = process.communicate()
|
|
||||||
|
|
||||||
print(output.decode())
|
|
||||||
|
|
||||||
import dask
|
|
||||||
from dask.delayed import delayed
|
|
||||||
from dask.distributed import Client, wait
|
|
||||||
|
|
||||||
_client = IPADDR + str(":8786")
|
|
||||||
|
|
||||||
client = dask.distributed.Client(_client)
|
|
||||||
|
|
||||||
def initialize_rmm_pool():
|
def initialize_rmm_pool():
|
||||||
from librmm_cffi import librmm_config as rmm_cfg
|
from librmm_cffi import librmm_config as rmm_cfg
|
||||||
|
|
||||||
@@ -81,15 +34,17 @@ def run_dask_task(func, **kwargs):
|
|||||||
task = func(**kwargs)
|
task = func(**kwargs)
|
||||||
return task
|
return task
|
||||||
|
|
||||||
def process_quarter_gpu(year=2000, quarter=1, perf_file=""):
|
def process_quarter_gpu(client, col_names_path, acq_data_path, year=2000, quarter=1, perf_file=""):
|
||||||
|
dask_client = client
|
||||||
ml_arrays = run_dask_task(delayed(run_gpu_workflow),
|
ml_arrays = run_dask_task(delayed(run_gpu_workflow),
|
||||||
|
col_path=col_names_path,
|
||||||
|
acq_path=acq_data_path,
|
||||||
quarter=quarter,
|
quarter=quarter,
|
||||||
year=year,
|
year=year,
|
||||||
perf_file=perf_file)
|
perf_file=perf_file)
|
||||||
return client.compute(ml_arrays,
|
return dask_client.compute(ml_arrays,
|
||||||
optimize_graph=False,
|
optimize_graph=False,
|
||||||
fifo_timeout="0ms"
|
fifo_timeout="0ms")
|
||||||
)
|
|
||||||
|
|
||||||
def null_workaround(df, **kwargs):
|
def null_workaround(df, **kwargs):
|
||||||
for column, data_type in df.dtypes.items():
|
for column, data_type in df.dtypes.items():
|
||||||
@@ -99,9 +54,9 @@ def null_workaround(df, **kwargs):
|
|||||||
df[column] = df[column].fillna(-1)
|
df[column] = df[column].fillna(-1)
|
||||||
return df
|
return df
|
||||||
|
|
||||||
def run_gpu_workflow(quarter=1, year=2000, perf_file="", **kwargs):
|
def run_gpu_workflow(col_path, acq_path, quarter=1, year=2000, perf_file="", **kwargs):
|
||||||
names = gpu_load_names()
|
names = gpu_load_names(col_path=col_path)
|
||||||
acq_gdf = gpu_load_acquisition_csv(acquisition_path= acq_data_path + "/Acquisition_"
|
acq_gdf = gpu_load_acquisition_csv(acquisition_path= acq_path + "/Acquisition_"
|
||||||
+ str(year) + "Q" + str(quarter) + ".txt")
|
+ str(year) + "Q" + str(quarter) + ".txt")
|
||||||
acq_gdf = acq_gdf.merge(names, how='left', on=['seller_name'])
|
acq_gdf = acq_gdf.merge(names, how='left', on=['seller_name'])
|
||||||
acq_gdf.drop_column('seller_name')
|
acq_gdf.drop_column('seller_name')
|
||||||
@@ -231,7 +186,7 @@ def gpu_load_acquisition_csv(acquisition_path, **kwargs):
|
|||||||
|
|
||||||
return cudf.read_csv(acquisition_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1)
|
return cudf.read_csv(acquisition_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1)
|
||||||
|
|
||||||
def gpu_load_names(**kwargs):
|
def gpu_load_names(col_path):
|
||||||
""" Loads names used for renaming the banks
|
""" Loads names used for renaming the banks
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
@@ -248,7 +203,7 @@ def gpu_load_names(**kwargs):
|
|||||||
("new", "category"),
|
("new", "category"),
|
||||||
])
|
])
|
||||||
|
|
||||||
return cudf.read_csv(col_names_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1)
|
return cudf.read_csv(col_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1)
|
||||||
|
|
||||||
def create_ever_features(gdf, **kwargs):
|
def create_ever_features(gdf, **kwargs):
|
||||||
everdf = gdf[['loan_id', 'current_loan_delinquency_status']]
|
everdf = gdf[['loan_id', 'current_loan_delinquency_status']]
|
||||||
@@ -384,117 +339,157 @@ def last_mile_cleaning(df, **kwargs):
|
|||||||
df['delinquency_12'] = df['delinquency_12'].fillna(False).astype('int32')
|
df['delinquency_12'] = df['delinquency_12'].fillna(False).astype('int32')
|
||||||
for column in df.columns:
|
for column in df.columns:
|
||||||
df[column] = df[column].fillna(-1)
|
df[column] = df[column].fillna(-1)
|
||||||
return df.to_arrow(index=False)
|
return df.to_arrow(preserve_index=False)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
#print('XGBOOST_BUILD_DOC is ' + os.environ['XGBOOST_BUILD_DOC'])
|
||||||
|
parser = argparse.ArgumentParser("rapidssample")
|
||||||
|
parser.add_argument("--data_dir", type=str, help="location of data")
|
||||||
|
parser.add_argument("--num_gpu", type=int, help="Number of GPUs to use", default=1)
|
||||||
|
parser.add_argument("--part_count", type=int, help="Number of data files to train against", default=2)
|
||||||
|
parser.add_argument("--end_year", type=int, help="Year to end the data load", default=2000)
|
||||||
|
parser.add_argument("--cpu_predictor", type=str, help="Flag to use CPU for prediction", default='False')
|
||||||
|
parser.add_argument('-f', type=str, default='') # added for notebook execution scenarios
|
||||||
|
args = parser.parse_args()
|
||||||
|
data_dir = args.data_dir
|
||||||
|
num_gpu = args.num_gpu
|
||||||
|
part_count = args.part_count
|
||||||
|
end_year = args.end_year
|
||||||
|
cpu_predictor = args.cpu_predictor.lower() in ('yes', 'true', 't', 'y', '1')
|
||||||
|
|
||||||
|
if cpu_predictor:
|
||||||
|
print('Training with CPUs require num gpu = 1')
|
||||||
|
num_gpu = 1
|
||||||
|
|
||||||
|
print('data_dir = {0}'.format(data_dir))
|
||||||
|
print('num_gpu = {0}'.format(num_gpu))
|
||||||
|
print('part_count = {0}'.format(part_count))
|
||||||
|
#part_count = part_count + 1 # adding one because the usage below is not inclusive
|
||||||
|
print('end_year = {0}'.format(end_year))
|
||||||
|
print('cpu_predictor = {0}'.format(cpu_predictor))
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
cmd = "hostname --all-ip-addresses"
|
||||||
|
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
|
||||||
|
output, error = process.communicate()
|
||||||
|
IPADDR = str(output.decode()).split()[0]
|
||||||
|
|
||||||
|
cluster = LocalCUDACluster(ip=IPADDR,n_workers=num_gpu)
|
||||||
|
client = Client(cluster)
|
||||||
|
client
|
||||||
|
print(client.ncores())
|
||||||
|
|
||||||
# to download data for this notebook, visit https://rapidsai.github.io/demos/datasets/mortgage-data and update the following paths accordingly
|
# to download data for this notebook, visit https://rapidsai.github.io/demos/datasets/mortgage-data and update the following paths accordingly
|
||||||
acq_data_path = "{0}/acq".format(data_dir) #"/rapids/data/mortgage/acq"
|
acq_data_path = "{0}/acq".format(data_dir) #"/rapids/data/mortgage/acq"
|
||||||
perf_data_path = "{0}/perf".format(data_dir) #"/rapids/data/mortgage/perf"
|
perf_data_path = "{0}/perf".format(data_dir) #"/rapids/data/mortgage/perf"
|
||||||
col_names_path = "{0}/names.csv".format(data_dir) # "/rapids/data/mortgage/names.csv"
|
col_names_path = "{0}/names.csv".format(data_dir) # "/rapids/data/mortgage/names.csv"
|
||||||
start_year = 2000
|
start_year = 2000
|
||||||
#end_year = 2000 # end_year is inclusive -- converted to parameter
|
#end_year = 2000 # end_year is inclusive -- converted to parameter
|
||||||
#part_count = 2 # the number of data files to train against -- converted to parameter
|
#part_count = 2 # the number of data files to train against -- converted to parameter
|
||||||
|
|
||||||
client.run(initialize_rmm_pool)
|
client.run(initialize_rmm_pool)
|
||||||
|
client
|
||||||
|
print(client.ncores())
|
||||||
# NOTE: The ETL calculates additional features which are then dropped before creating the XGBoost DMatrix.
|
# NOTE: The ETL calculates additional features which are then dropped before creating the XGBoost DMatrix.
|
||||||
# This can be optimized to avoid calculating the dropped features.
|
# This can be optimized to avoid calculating the dropped features.
|
||||||
print("Reading ...")
|
print("Reading ...")
|
||||||
t1 = datetime.datetime.now()
|
t1 = datetime.datetime.now()
|
||||||
gpu_dfs = []
|
gpu_dfs = []
|
||||||
gpu_time = 0
|
gpu_time = 0
|
||||||
quarter = 1
|
quarter = 1
|
||||||
year = start_year
|
year = start_year
|
||||||
count = 0
|
count = 0
|
||||||
while year <= end_year:
|
while year <= end_year:
|
||||||
for file in glob(os.path.join(perf_data_path + "/Performance_" + str(year) + "Q" + str(quarter) + "*")):
|
for file in glob(os.path.join(perf_data_path + "/Performance_" + str(year) + "Q" + str(quarter) + "*")):
|
||||||
if count < part_count:
|
if count < part_count:
|
||||||
gpu_dfs.append(process_quarter_gpu(year=year, quarter=quarter, perf_file=file))
|
gpu_dfs.append(process_quarter_gpu(client, col_names_path, acq_data_path, year=year, quarter=quarter, perf_file=file))
|
||||||
count += 1
|
count += 1
|
||||||
print('file: {0}'.format(file))
|
print('file: {0}'.format(file))
|
||||||
print('count: {0}'.format(count))
|
print('count: {0}'.format(count))
|
||||||
quarter += 1
|
quarter += 1
|
||||||
if quarter == 5:
|
if quarter == 5:
|
||||||
year += 1
|
year += 1
|
||||||
quarter = 1
|
quarter = 1
|
||||||
|
|
||||||
|
wait(gpu_dfs)
|
||||||
|
t2 = datetime.datetime.now()
|
||||||
|
print("Reading time ...")
|
||||||
|
print(t2-t1)
|
||||||
|
print('len(gpu_dfs) is {0}'.format(len(gpu_dfs)))
|
||||||
|
|
||||||
|
client.run(cudf._gdf.rmm_finalize)
|
||||||
|
client.run(initialize_rmm_no_pool)
|
||||||
|
client
|
||||||
|
print(client.ncores())
|
||||||
|
dxgb_gpu_params = {
|
||||||
|
'nround': 100,
|
||||||
|
'max_depth': 8,
|
||||||
|
'max_leaves': 2**8,
|
||||||
|
'alpha': 0.9,
|
||||||
|
'eta': 0.1,
|
||||||
|
'gamma': 0.1,
|
||||||
|
'learning_rate': 0.1,
|
||||||
|
'subsample': 1,
|
||||||
|
'reg_lambda': 1,
|
||||||
|
'scale_pos_weight': 2,
|
||||||
|
'min_child_weight': 30,
|
||||||
|
'tree_method': 'gpu_hist',
|
||||||
|
'n_gpus': 1,
|
||||||
|
'distributed_dask': True,
|
||||||
|
'loss': 'ls',
|
||||||
|
'objective': 'gpu:reg:linear',
|
||||||
|
'max_features': 'auto',
|
||||||
|
'criterion': 'friedman_mse',
|
||||||
|
'grow_policy': 'lossguide',
|
||||||
|
'verbose': True
|
||||||
|
}
|
||||||
|
|
||||||
|
if cpu_predictor:
|
||||||
|
print('Training using CPUs')
|
||||||
|
dxgb_gpu_params['predictor'] = 'cpu_predictor'
|
||||||
|
dxgb_gpu_params['tree_method'] = 'hist'
|
||||||
|
dxgb_gpu_params['objective'] = 'reg:linear'
|
||||||
|
|
||||||
wait(gpu_dfs)
|
|
||||||
t2 = datetime.datetime.now()
|
|
||||||
print("Reading time ...")
|
|
||||||
print(t2-t1)
|
|
||||||
print('len(gpu_dfs) is {0}'.format(len(gpu_dfs)))
|
|
||||||
|
|
||||||
client.run(cudf._gdf.rmm_finalize)
|
|
||||||
client.run(initialize_rmm_no_pool)
|
|
||||||
|
|
||||||
dxgb_gpu_params = {
|
|
||||||
'nround': 100,
|
|
||||||
'max_depth': 8,
|
|
||||||
'max_leaves': 2**8,
|
|
||||||
'alpha': 0.9,
|
|
||||||
'eta': 0.1,
|
|
||||||
'gamma': 0.1,
|
|
||||||
'learning_rate': 0.1,
|
|
||||||
'subsample': 1,
|
|
||||||
'reg_lambda': 1,
|
|
||||||
'scale_pos_weight': 2,
|
|
||||||
'min_child_weight': 30,
|
|
||||||
'tree_method': 'gpu_hist',
|
|
||||||
'n_gpus': 1,
|
|
||||||
'distributed_dask': True,
|
|
||||||
'loss': 'ls',
|
|
||||||
'objective': 'gpu:reg:linear',
|
|
||||||
'max_features': 'auto',
|
|
||||||
'criterion': 'friedman_mse',
|
|
||||||
'grow_policy': 'lossguide',
|
|
||||||
'verbose': True
|
|
||||||
}
|
|
||||||
|
|
||||||
if cpu_predictor:
|
|
||||||
print('Training using CPUs')
|
|
||||||
dxgb_gpu_params['predictor'] = 'cpu_predictor'
|
|
||||||
dxgb_gpu_params['tree_method'] = 'hist'
|
|
||||||
dxgb_gpu_params['objective'] = 'reg:linear'
|
|
||||||
|
|
||||||
else:
|
|
||||||
print('Training using GPUs')
|
|
||||||
|
|
||||||
print('Training parameters are {0}'.format(dxgb_gpu_params))
|
|
||||||
|
|
||||||
gpu_dfs = [delayed(DataFrame.from_arrow)(gpu_df) for gpu_df in gpu_dfs[:part_count]]
|
|
||||||
|
|
||||||
gpu_dfs = [gpu_df for gpu_df in gpu_dfs]
|
|
||||||
|
|
||||||
wait(gpu_dfs)
|
|
||||||
tmp_map = [(gpu_df, list(client.who_has(gpu_df).values())[0]) for gpu_df in gpu_dfs]
|
|
||||||
new_map = {}
|
|
||||||
for key, value in tmp_map:
|
|
||||||
if value not in new_map:
|
|
||||||
new_map[value] = [key]
|
|
||||||
else:
|
else:
|
||||||
new_map[value].append(key)
|
print('Training using GPUs')
|
||||||
|
|
||||||
|
print('Training parameters are {0}'.format(dxgb_gpu_params))
|
||||||
|
|
||||||
|
gpu_dfs = [delayed(DataFrame.from_arrow)(gpu_df) for gpu_df in gpu_dfs[:part_count]]
|
||||||
|
gpu_dfs = [gpu_df for gpu_df in gpu_dfs]
|
||||||
|
wait(gpu_dfs)
|
||||||
|
|
||||||
|
tmp_map = [(gpu_df, list(client.who_has(gpu_df).values())[0]) for gpu_df in gpu_dfs]
|
||||||
|
new_map = {}
|
||||||
|
for key, value in tmp_map:
|
||||||
|
if value not in new_map:
|
||||||
|
new_map[value] = [key]
|
||||||
|
else:
|
||||||
|
new_map[value].append(key)
|
||||||
|
|
||||||
|
del(tmp_map)
|
||||||
|
gpu_dfs = []
|
||||||
|
for list_delayed in new_map.values():
|
||||||
|
gpu_dfs.append(delayed(cudf.concat)(list_delayed))
|
||||||
|
|
||||||
|
del(new_map)
|
||||||
|
gpu_dfs = [(gpu_df[['delinquency_12']], gpu_df[delayed(list)(gpu_df.columns.difference(['delinquency_12']))]) for gpu_df in gpu_dfs]
|
||||||
|
gpu_dfs = [(gpu_df[0].persist(), gpu_df[1].persist()) for gpu_df in gpu_dfs]
|
||||||
|
|
||||||
|
gpu_dfs = [dask.delayed(xgb.DMatrix)(gpu_df[1], gpu_df[0]) for gpu_df in gpu_dfs]
|
||||||
|
gpu_dfs = [gpu_df.persist() for gpu_df in gpu_dfs]
|
||||||
|
gc.collect()
|
||||||
|
wait(gpu_dfs)
|
||||||
|
|
||||||
|
labels = None
|
||||||
|
t1 = datetime.datetime.now()
|
||||||
|
bst = dxgb_gpu.train(client, dxgb_gpu_params, gpu_dfs, labels, num_boost_round=dxgb_gpu_params['nround'])
|
||||||
|
t2 = datetime.datetime.now()
|
||||||
|
print("Training time ...")
|
||||||
|
print(t2-t1)
|
||||||
|
print('str(bst) is {0}'.format(str(bst)))
|
||||||
|
print('Exiting script')
|
||||||
|
|
||||||
del(tmp_map)
|
if __name__ == '__main__':
|
||||||
gpu_dfs = []
|
main()
|
||||||
for list_delayed in new_map.values():
|
|
||||||
gpu_dfs.append(delayed(cudf.concat)(list_delayed))
|
|
||||||
|
|
||||||
del(new_map)
|
|
||||||
gpu_dfs = [(gpu_df[['delinquency_12']], gpu_df[delayed(list)(gpu_df.columns.difference(['delinquency_12']))]) for gpu_df in gpu_dfs]
|
|
||||||
gpu_dfs = [(gpu_df[0].persist(), gpu_df[1].persist()) for gpu_df in gpu_dfs]
|
|
||||||
gpu_dfs = [dask.delayed(xgb.DMatrix)(gpu_df[1], gpu_df[0]) for gpu_df in gpu_dfs]
|
|
||||||
gpu_dfs = [gpu_df.persist() for gpu_df in gpu_dfs]
|
|
||||||
|
|
||||||
gc.collect()
|
|
||||||
labels = None
|
|
||||||
|
|
||||||
print('str(gpu_dfs) is {0}'.format(str(gpu_dfs)))
|
|
||||||
|
|
||||||
wait(gpu_dfs)
|
|
||||||
t1 = datetime.datetime.now()
|
|
||||||
bst = dxgb_gpu.train(client, dxgb_gpu_params, gpu_dfs, labels, num_boost_round=dxgb_gpu_params['nround'])
|
|
||||||
t2 = datetime.datetime.now()
|
|
||||||
print("Training time ...")
|
|
||||||
print(t2-t1)
|
|
||||||
print('str(bst) is {0}'.format(str(bst)))
|
|
||||||
print('Exiting script')
|
|
||||||
|
|||||||
35
contrib/RAPIDS/rapids.yml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
name: rapids
|
||||||
|
channels:
|
||||||
|
- nvidia
|
||||||
|
- numba
|
||||||
|
- conda-forge
|
||||||
|
- rapidsai
|
||||||
|
- defaults
|
||||||
|
- pytorch
|
||||||
|
|
||||||
|
dependencies:
|
||||||
|
- arrow-cpp=0.12.0
|
||||||
|
- bokeh
|
||||||
|
- cffi=1.11.5
|
||||||
|
- cmake=3.12
|
||||||
|
- cuda92
|
||||||
|
- cython==0.29
|
||||||
|
- dask=1.1.1
|
||||||
|
- distributed=1.25.3
|
||||||
|
- faiss-gpu=1.5.0
|
||||||
|
- numba=0.42
|
||||||
|
- numpy=1.15.4
|
||||||
|
- nvstrings
|
||||||
|
- pandas=0.23.4
|
||||||
|
- pyarrow=0.12.0
|
||||||
|
- scikit-learn
|
||||||
|
- scipy
|
||||||
|
- cudf
|
||||||
|
- cuml
|
||||||
|
- python=3.6.2
|
||||||
|
- jupyterlab
|
||||||
|
- pip:
|
||||||
|
- file:/rapids/xgboost/python-package/dist/xgboost-0.81-py3-none-any.whl
|
||||||
|
- git+https://github.com/rapidsai/dask-xgboost@dask-cudf
|
||||||
|
- git+https://github.com/rapidsai/dask-cudf@master
|
||||||
|
- git+https://github.com/rapidsai/dask-cuda@master
|
||||||
@@ -1 +0,0 @@
|
|||||||
google-site-verification: googleade5d7141b3f2910.html
|
|
||||||
@@ -42,21 +42,7 @@ Below are the three execution environments supported by AutoML.
|
|||||||
## Running samples in a Local Conda environment
|
## Running samples in a Local Conda environment
|
||||||
|
|
||||||
To run these notebook on your own notebook server, use these installation instructions.
|
To run these notebook on your own notebook server, use these installation instructions.
|
||||||
|
The instructions below will install everything you need and then start a Jupyter notebook.
|
||||||
The instructions below will install everything you need and then start a Jupyter notebook. To start your Jupyter notebook manually, use:
|
|
||||||
|
|
||||||
```
|
|
||||||
conda activate azure_automl
|
|
||||||
jupyter notebook
|
|
||||||
```
|
|
||||||
|
|
||||||
or on Mac:
|
|
||||||
|
|
||||||
```
|
|
||||||
source activate azure_automl
|
|
||||||
jupyter notebook
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### 1. Install mini-conda from [here](https://conda.io/miniconda.html), choose 64-bit Python 3.7 or higher.
|
### 1. Install mini-conda from [here](https://conda.io/miniconda.html), choose 64-bit Python 3.7 or higher.
|
||||||
- **Note**: if you already have conda installed, you can keep using it but it should be version 4.4.10 or later (as shown by: conda -V). If you have a previous version installed, you can update it using the command: conda update conda.
|
- **Note**: if you already have conda installed, you can keep using it but it should be version 4.4.10 or later (as shown by: conda -V). If you have a previous version installed, you can update it using the command: conda update conda.
|
||||||
@@ -97,6 +83,21 @@ bash automl_setup_linux.sh
|
|||||||
- Please make sure you use the Python [conda env:azure_automl] kernel when trying the sample Notebooks.
|
- Please make sure you use the Python [conda env:azure_automl] kernel when trying the sample Notebooks.
|
||||||
- Follow the instructions in the individual notebooks to explore various features in AutoML
|
- Follow the instructions in the individual notebooks to explore various features in AutoML
|
||||||
|
|
||||||
|
### 6. Starting jupyter notebook manually
|
||||||
|
To start your Jupyter notebook manually, use:
|
||||||
|
|
||||||
|
```
|
||||||
|
conda activate azure_automl
|
||||||
|
jupyter notebook
|
||||||
|
```
|
||||||
|
|
||||||
|
or on Mac or Linux:
|
||||||
|
|
||||||
|
```
|
||||||
|
source activate azure_automl
|
||||||
|
jupyter notebook
|
||||||
|
```
|
||||||
|
|
||||||
<a name="samples"></a>
|
<a name="samples"></a>
|
||||||
# Automated ML SDK Sample Notebooks
|
# Automated ML SDK Sample Notebooks
|
||||||
|
|
||||||
@@ -119,7 +120,7 @@ bash automl_setup_linux.sh
|
|||||||
- Retrieving models for any iteration or logged metric
|
- Retrieving models for any iteration or logged metric
|
||||||
- Specify automl settings as kwargs
|
- Specify automl settings as kwargs
|
||||||
|
|
||||||
- [auto-ml-remote-batchai.ipynb](remote-batchai/auto-ml-remote-batchai.ipynb)
|
- [auto-ml-remote-amlcompute.ipynb](remote-batchai/auto-ml-remote-amlcompute.ipynb)
|
||||||
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
|
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
|
||||||
- Example of using automated ML for classification using remote AmlCompute for training
|
- Example of using automated ML for classification using remote AmlCompute for training
|
||||||
- Parallel execution of iterations
|
- Parallel execution of iterations
|
||||||
@@ -188,6 +189,11 @@ bash automl_setup_linux.sh
|
|||||||
- Dataset: [Dominick's grocery sales of orange juice](forecasting-b/dominicks_OJ.csv)
|
- Dataset: [Dominick's grocery sales of orange juice](forecasting-b/dominicks_OJ.csv)
|
||||||
- Example of training an AutoML forecasting model on multiple time-series
|
- Example of training an AutoML forecasting model on multiple time-series
|
||||||
|
|
||||||
|
- [auto-ml-classification-with-onnx.ipynb](classification-with-onnx/auto-ml-classification-with-onnx.ipynb)
|
||||||
|
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
|
||||||
|
- Simple example of using Auto ML for classification with ONNX models
|
||||||
|
- Uses local compute for training
|
||||||
|
|
||||||
<a name="documentation"></a>
|
<a name="documentation"></a>
|
||||||
See [Configure automated machine learning experiments](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-auto-train) to learn how more about the the settings and features available for automated machine learning experiments.
|
See [Configure automated machine learning experiments](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-auto-train) to learn how more about the the settings and features available for automated machine learning experiments.
|
||||||
|
|
||||||
@@ -232,6 +238,13 @@ If a sample notebook fails with an error that property, method or library does n
|
|||||||
## Numpy import fails on Windows
|
## Numpy import fails on Windows
|
||||||
Some Windows environments see an error loading numpy with the latest Python version 3.6.8. If you see this issue, try with Python version 3.6.7.
|
Some Windows environments see an error loading numpy with the latest Python version 3.6.8. If you see this issue, try with Python version 3.6.7.
|
||||||
|
|
||||||
|
## Numpy import fails
|
||||||
|
Check the tensorflow version in the automated ml conda environment. Supported versions are < 1.13. Uninstall tensorflow from the environment if version is >= 1.13
|
||||||
|
You may check the version of tensorflow and uninstall as follows
|
||||||
|
1) start a command shell, activate conda environment where automated ml packages are installed
|
||||||
|
2) enter `pip freeze` and look for `tensorflow` , if found, the version listed should be < 1.13
|
||||||
|
3) If the listed version is a not a supported version, `pip uninstall tensorflow` in the command shell and enter y for confirmation.
|
||||||
|
|
||||||
## Remote run: DsvmCompute.create fails
|
## Remote run: DsvmCompute.create fails
|
||||||
There are several reasons why the DsvmCompute.create can fail. The reason is usually in the error message but you have to look at the end of the error message for the detailed reason. Some common reasons are:
|
There are several reasons why the DsvmCompute.create can fail. The reason is usually in the error message but you have to look at the end of the error message for the detailed reason. Some common reasons are:
|
||||||
1) `Compute name is invalid, it should start with a letter, be between 2 and 16 character, and only include letters (a-zA-Z), numbers (0-9) and \'-\'.` Note that underscore is not allowed in the name.
|
1) `Compute name is invalid, it should start with a letter, be between 2 and 16 character, and only include letters (a-zA-Z), numbers (0-9) and \'-\'.` Note that underscore is not allowed in the name.
|
||||||
|
|||||||
@@ -1,21 +1,22 @@
|
|||||||
name: azure_automl
|
name: azure_automl
|
||||||
dependencies:
|
dependencies:
|
||||||
# The python interpreter version.
|
# The python interpreter version.
|
||||||
# Currently Azure ML only supports 3.5.2 and later.
|
# Currently Azure ML only supports 3.5.2 and later.
|
||||||
- python>=3.5.2,<3.6.8
|
- python>=3.5.2,<3.6.8
|
||||||
- nb_conda
|
- nb_conda
|
||||||
- matplotlib==2.1.0
|
- matplotlib==2.1.0
|
||||||
- numpy>=1.11.0,<1.15.0
|
- numpy>=1.11.0,<1.15.0
|
||||||
- cython
|
- cython
|
||||||
- urllib3<1.24
|
- urllib3<1.24
|
||||||
- scipy>=1.0.0,<=1.1.0
|
- scipy>=1.0.0,<=1.1.0
|
||||||
- scikit-learn>=0.18.0,<=0.19.1
|
- scikit-learn>=0.18.0,<=0.19.1
|
||||||
- pandas>=0.22.0,<0.23.0
|
- pandas>=0.22.0,<0.23.0
|
||||||
- tensorflow>=1.12.0
|
- tensorflow>=1.12.0
|
||||||
- py-xgboost<=0.80
|
- py-xgboost<=0.80
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
- azureml-sdk[automl,notebooks,explain]
|
- azureml-sdk[automl,explain]
|
||||||
- pandas_ml
|
- azureml-widgets
|
||||||
|
- pandas_ml
|
||||||
|
|
||||||
|
|||||||
@@ -1,22 +1,23 @@
|
|||||||
name: azure_automl
|
name: azure_automl
|
||||||
dependencies:
|
dependencies:
|
||||||
# The python interpreter version.
|
# The python interpreter version.
|
||||||
# Currently Azure ML only supports 3.5.2 and later.
|
# Currently Azure ML only supports 3.5.2 and later.
|
||||||
- python>=3.5.2,<3.6.8
|
- python>=3.5.2,<3.6.8
|
||||||
- nb_conda
|
- nb_conda
|
||||||
- matplotlib==2.1.0
|
- matplotlib==2.1.0
|
||||||
- numpy>=1.15.3
|
- numpy>=1.15.3
|
||||||
- cython
|
- cython
|
||||||
- urllib3<1.24
|
- urllib3<1.24
|
||||||
- scipy>=1.0.0,<=1.1.0
|
- scipy>=1.0.0,<=1.1.0
|
||||||
- scikit-learn>=0.18.0,<=0.19.1
|
- scikit-learn>=0.18.0,<=0.19.1
|
||||||
- pandas>=0.22.0,<0.23.0
|
- pandas>=0.22.0,<0.23.0
|
||||||
- tensorflow>=1.12.0
|
- tensorflow>=1.12.0
|
||||||
- py-xgboost<=0.80
|
- py-xgboost<=0.80
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
- azureml-sdk[automl,notebooks,explain]
|
- azureml-sdk[automl,explain]
|
||||||
- pandas_ml
|
- azureml-widgets
|
||||||
|
- pandas_ml
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,51 +1,51 @@
|
|||||||
@echo off
|
@echo off
|
||||||
set conda_env_name=%1
|
set conda_env_name=%1
|
||||||
set automl_env_file=%2
|
set automl_env_file=%2
|
||||||
set options=%3
|
set options=%3
|
||||||
set PIP_NO_WARN_SCRIPT_LOCATION=0
|
set PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||||
|
|
||||||
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl"
|
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl"
|
||||||
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml"
|
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml"
|
||||||
|
|
||||||
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
||||||
|
|
||||||
call conda activate %conda_env_name% 2>nul:
|
call conda activate %conda_env_name% 2>nul:
|
||||||
|
|
||||||
if not errorlevel 1 (
|
if not errorlevel 1 (
|
||||||
echo Upgrading azureml-sdk[automl,notebooks,explain] in existing conda environment %conda_env_name%
|
echo Upgrading azureml-sdk[automl,notebooks,explain] in existing conda environment %conda_env_name%
|
||||||
call pip install --upgrade azureml-sdk[automl,notebooks,explain]
|
call pip install --upgrade azureml-sdk[automl,notebooks,explain]
|
||||||
if errorlevel 1 goto ErrorExit
|
if errorlevel 1 goto ErrorExit
|
||||||
) else (
|
) else (
|
||||||
call conda env create -f %automl_env_file% -n %conda_env_name%
|
call conda env create -f %automl_env_file% -n %conda_env_name%
|
||||||
)
|
)
|
||||||
|
|
||||||
call conda activate %conda_env_name% 2>nul:
|
call conda activate %conda_env_name% 2>nul:
|
||||||
if errorlevel 1 goto ErrorExit
|
if errorlevel 1 goto ErrorExit
|
||||||
|
|
||||||
call python -m ipykernel install --user --name %conda_env_name% --display-name "Python (%conda_env_name%)"
|
call python -m ipykernel install --user --name %conda_env_name% --display-name "Python (%conda_env_name%)"
|
||||||
|
|
||||||
REM azureml.widgets is now installed as part of the pip install under the conda env.
|
REM azureml.widgets is now installed as part of the pip install under the conda env.
|
||||||
REM Removing the old user install so that the notebooks will use the latest widget.
|
REM Removing the old user install so that the notebooks will use the latest widget.
|
||||||
call jupyter nbextension uninstall --user --py azureml.widgets
|
call jupyter nbextension uninstall --user --py azureml.widgets
|
||||||
|
|
||||||
echo.
|
echo.
|
||||||
echo.
|
echo.
|
||||||
echo ***************************************
|
echo ***************************************
|
||||||
echo * AutoML setup completed successfully *
|
echo * AutoML setup completed successfully *
|
||||||
echo ***************************************
|
echo ***************************************
|
||||||
IF NOT "%options%"=="nolaunch" (
|
IF NOT "%options%"=="nolaunch" (
|
||||||
echo.
|
echo.
|
||||||
echo Starting jupyter notebook - please run the configuration notebook
|
echo Starting jupyter notebook - please run the configuration notebook
|
||||||
echo.
|
echo.
|
||||||
jupyter notebook --log-level=50 --notebook-dir='..\..'
|
jupyter notebook --log-level=50 --notebook-dir='..\..'
|
||||||
)
|
)
|
||||||
|
|
||||||
goto End
|
goto End
|
||||||
|
|
||||||
:YmlMissing
|
:YmlMissing
|
||||||
echo File %automl_env_file% not found.
|
echo File %automl_env_file% not found.
|
||||||
|
|
||||||
:ErrorExit
|
:ErrorExit
|
||||||
echo Install failed
|
echo Install failed
|
||||||
|
|
||||||
:End
|
:End
|
||||||
@@ -119,7 +119,7 @@
|
|||||||
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
|
"|**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -139,7 +139,6 @@
|
|||||||
" primary_metric = 'AUC_weighted',\n",
|
" primary_metric = 'AUC_weighted',\n",
|
||||||
" iteration_timeout_minutes = 20,\n",
|
" iteration_timeout_minutes = 20,\n",
|
||||||
" iterations = 10,\n",
|
" iterations = 10,\n",
|
||||||
" n_cross_validations = 2,\n",
|
|
||||||
" verbosity = logging.INFO,\n",
|
" verbosity = logging.INFO,\n",
|
||||||
" X = X_train, \n",
|
" X = X_train, \n",
|
||||||
" y = y_train,\n",
|
" y = y_train,\n",
|
||||||
@@ -263,7 +262,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. Details about retrieving the versions can be found in notebook [12.auto-ml-retrieve-the-training-sdk-versions](12.auto-ml-retrieve-the-training-sdk-versions.ipynb)."
|
"To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. The following cells create a file, myenv.yml, which specifies the dependencies from the run."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -0,0 +1,284 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Automated Machine Learning\n",
|
||||||
|
"_**Classification with Local Compute**_\n",
|
||||||
|
"\n",
|
||||||
|
"## Contents\n",
|
||||||
|
"1. [Introduction](#Introduction)\n",
|
||||||
|
"1. [Setup](#Setup)\n",
|
||||||
|
"1. [Data](#Data)\n",
|
||||||
|
"1. [Train](#Train)\n",
|
||||||
|
"1. [Results](#Results)\n",
|
||||||
|
"1. [Test](#Test)\n",
|
||||||
|
"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Introduction\n",
|
||||||
|
"\n",
|
||||||
|
"In this example we use the scikit-learn's [digit dataset](http://scikit-learn.org/stable/datasets/index.html#optical-recognition-of-handwritten-digits-dataset) to showcase how you can use AutoML for a simple classification problem.\n",
|
||||||
|
"\n",
|
||||||
|
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
||||||
|
"\n",
|
||||||
|
"Please find the ONNX related documentations [here](https://github.com/onnx/onnx).\n",
|
||||||
|
"\n",
|
||||||
|
"In this notebook you will learn how to:\n",
|
||||||
|
"1. Create an `Experiment` in an existing `Workspace`.\n",
|
||||||
|
"2. Configure AutoML using `AutoMLConfig`.\n",
|
||||||
|
"3. Train the model using local compute with ONNX compatible config on.\n",
|
||||||
|
"4. Explore the results and save the ONNX model."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Setup\n",
|
||||||
|
"\n",
|
||||||
|
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import logging\n",
|
||||||
|
"\n",
|
||||||
|
"from matplotlib import pyplot as plt\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"from sklearn import datasets\n",
|
||||||
|
"\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"from azureml.core.experiment import Experiment\n",
|
||||||
|
"from azureml.core.workspace import Workspace\n",
|
||||||
|
"from azureml.train.automl import AutoMLConfig"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"\n",
|
||||||
|
"# Choose a name for the experiment and specify the project folder.\n",
|
||||||
|
"experiment_name = 'automl-classification-onnx'\n",
|
||||||
|
"project_folder = './sample_projects/automl-classification-onnx'\n",
|
||||||
|
"\n",
|
||||||
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
|
"\n",
|
||||||
|
"output = {}\n",
|
||||||
|
"output['SDK version'] = azureml.core.VERSION\n",
|
||||||
|
"output['Subscription ID'] = ws.subscription_id\n",
|
||||||
|
"output['Workspace Name'] = ws.name\n",
|
||||||
|
"output['Resource Group'] = ws.resource_group\n",
|
||||||
|
"output['Location'] = ws.location\n",
|
||||||
|
"output['Project Directory'] = project_folder\n",
|
||||||
|
"output['Experiment Name'] = experiment.name\n",
|
||||||
|
"pd.set_option('display.max_colwidth', -1)\n",
|
||||||
|
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||||
|
"outputDf.T"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Data\n",
|
||||||
|
"\n",
|
||||||
|
"This uses scikit-learn's [load_digits](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html) method."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"digits = datasets.load_digits()\n",
|
||||||
|
"\n",
|
||||||
|
"# Exclude the first 100 rows from training so that they can be used for test.\n",
|
||||||
|
"X_train = digits.data[100:,:]\n",
|
||||||
|
"y_train = digits.target[100:]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Train with enable ONNX compatible models config on\n",
|
||||||
|
"\n",
|
||||||
|
"Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.\n",
|
||||||
|
"\n",
|
||||||
|
"Set the parameter enable_onnx_compatible_models=True, if you also want to generate the ONNX compatible models. Please note, the forecasting task and TensorFlow models are not ONNX compatible yet.\n",
|
||||||
|
"\n",
|
||||||
|
"|Property|Description|\n",
|
||||||
|
"|-|-|\n",
|
||||||
|
"|**task**|classification or regression|\n",
|
||||||
|
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
|
||||||
|
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
||||||
|
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||||
|
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||||
|
"|**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||||
|
"|**enable_onnx_compatible_models**|Enable the ONNX compatible models in the experiment.|\n",
|
||||||
|
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||||
|
" debug_log = 'automl_errors.log',\n",
|
||||||
|
" primary_metric = 'AUC_weighted',\n",
|
||||||
|
" iteration_timeout_minutes = 60,\n",
|
||||||
|
" iterations = 10,\n",
|
||||||
|
" verbosity = logging.INFO,\n",
|
||||||
|
" X = X_train, \n",
|
||||||
|
" y = y_train,\n",
|
||||||
|
" enable_onnx_compatible_models=True,\n",
|
||||||
|
" path = project_folder)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while.\n",
|
||||||
|
"In this example, we specify `show_output = True` to print currently running iterations to the console."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"local_run = experiment.submit(automl_config, show_output = True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"local_run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Results"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Widget for Monitoring Runs\n",
|
||||||
|
"\n",
|
||||||
|
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
|
||||||
|
"\n",
|
||||||
|
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"RunDetails(local_run).show() "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Retrieve the Best ONNX Model\n",
|
||||||
|
"\n",
|
||||||
|
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.\n",
|
||||||
|
"\n",
|
||||||
|
"Set the parameter return_onnx_model=True to retrieve the best ONNX model, instead of the Python model."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"best_run, onnx_mdl = local_run.get_output(return_onnx_model=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Save the best ONNX model"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.train.automl._vendor.automl.client.core.common.onnx_convert import OnnxConverter\n",
|
||||||
|
"onnx_fl_path = \"./best_model.onnx\"\n",
|
||||||
|
"OnnxConverter.save_onnx_model(onnx_mdl, onnx_fl_path)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "savitam"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.6",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python36"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
@@ -60,6 +60,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
"#Note: This notebook will install tensorflow if not already installed in the enviornment..\n",
|
||||||
"import logging\n",
|
"import logging\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from matplotlib import pyplot as plt\n",
|
"from matplotlib import pyplot as plt\n",
|
||||||
@@ -70,6 +71,17 @@
|
|||||||
"import azureml.core\n",
|
"import azureml.core\n",
|
||||||
"from azureml.core.experiment import Experiment\n",
|
"from azureml.core.experiment import Experiment\n",
|
||||||
"from azureml.core.workspace import Workspace\n",
|
"from azureml.core.workspace import Workspace\n",
|
||||||
|
"import sys\n",
|
||||||
|
"whitelist_models=[\"LightGBM\"]\n",
|
||||||
|
"if \"3.7\" != sys.version[0:3]:\n",
|
||||||
|
" try:\n",
|
||||||
|
" import tensorflow as tf1\n",
|
||||||
|
" except ImportError:\n",
|
||||||
|
" from pip._internal import main\n",
|
||||||
|
" main(['install', 'tensorflow>=1.10.0,<=1.12.0'])\n",
|
||||||
|
" logging.getLogger().setLevel(logging.ERROR)\n",
|
||||||
|
" whitelist_models=[\"TensorFlowLinearClassifier\", \"TensorFlowDNN\"]\n",
|
||||||
|
"\n",
|
||||||
"from azureml.train.automl import AutoMLConfig"
|
"from azureml.train.automl import AutoMLConfig"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -138,7 +150,7 @@
|
|||||||
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
|
"|**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|\n",
|
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|\n",
|
||||||
"|**whitelist_models**|List of models that AutoML should use. The possible values are listed [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#configure-your-experiment-settings).|"
|
"|**whitelist_models**|List of models that AutoML should use. The possible values are listed [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#configure-your-experiment-settings).|"
|
||||||
]
|
]
|
||||||
@@ -154,12 +166,11 @@
|
|||||||
" primary_metric = 'AUC_weighted',\n",
|
" primary_metric = 'AUC_weighted',\n",
|
||||||
" iteration_timeout_minutes = 60,\n",
|
" iteration_timeout_minutes = 60,\n",
|
||||||
" iterations = 10,\n",
|
" iterations = 10,\n",
|
||||||
" n_cross_validations = 3,\n",
|
|
||||||
" verbosity = logging.INFO,\n",
|
" verbosity = logging.INFO,\n",
|
||||||
" X = X_train, \n",
|
" X = X_train, \n",
|
||||||
" y = y_train,\n",
|
" y = y_train,\n",
|
||||||
" enable_tf=True,\n",
|
" enable_tf=True,\n",
|
||||||
" whitelist_models=[\"TensorFlowLinearClassifier\", \"TensorFlowDNN\"],\n",
|
" whitelist_models=whitelist_models,\n",
|
||||||
" path = project_folder)"
|
" path = project_folder)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -135,9 +135,8 @@
|
|||||||
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
|
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
|
||||||
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
||||||
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
|
||||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
|
"|**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -152,7 +151,6 @@
|
|||||||
" primary_metric = 'AUC_weighted',\n",
|
" primary_metric = 'AUC_weighted',\n",
|
||||||
" iteration_timeout_minutes = 60,\n",
|
" iteration_timeout_minutes = 60,\n",
|
||||||
" iterations = 25,\n",
|
" iterations = 25,\n",
|
||||||
" n_cross_validations = 3,\n",
|
|
||||||
" verbosity = logging.INFO,\n",
|
" verbosity = logging.INFO,\n",
|
||||||
" X = X_train, \n",
|
" X = X_train, \n",
|
||||||
" y = y_train,\n",
|
" y = y_train,\n",
|
||||||
@@ -274,8 +272,39 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"best_run, fitted_model = local_run.get_output()\n",
|
"best_run, fitted_model = local_run.get_output()\n",
|
||||||
"print(best_run)\n",
|
"print(best_run)"
|
||||||
"print(fitted_model)"
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Print the properties of the model\n",
|
||||||
|
"The fitted_model is a python object and you can read the different properties of the object.\n",
|
||||||
|
"The following shows printing hyperparameters for each step in the pipeline."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from pprint import pprint\n",
|
||||||
|
"\n",
|
||||||
|
"def print_model(model, prefix=\"\"):\n",
|
||||||
|
" for step in model.steps:\n",
|
||||||
|
" print(prefix + step[0])\n",
|
||||||
|
" if hasattr(step[1], 'estimators') and hasattr(step[1], 'weights'):\n",
|
||||||
|
" pprint({'estimators': list(e[0] for e in step[1].estimators), 'weights': step[1].weights})\n",
|
||||||
|
" print()\n",
|
||||||
|
" for estimator in step[1].estimators:\n",
|
||||||
|
" print_model(estimator[1], estimator[0]+ ' - ')\n",
|
||||||
|
" else:\n",
|
||||||
|
" pprint(step[1].get_params())\n",
|
||||||
|
" print()\n",
|
||||||
|
" \n",
|
||||||
|
"print_model(fitted_model)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -294,8 +323,16 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"lookup_metric = \"log_loss\"\n",
|
"lookup_metric = \"log_loss\"\n",
|
||||||
"best_run, fitted_model = local_run.get_output(metric = lookup_metric)\n",
|
"best_run, fitted_model = local_run.get_output(metric = lookup_metric)\n",
|
||||||
"print(best_run)\n",
|
"print(best_run)"
|
||||||
"print(fitted_model)"
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print_model(fitted_model)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -314,8 +351,16 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"iteration = 3\n",
|
"iteration = 3\n",
|
||||||
"third_run, third_model = local_run.get_output(iteration = iteration)\n",
|
"third_run, third_model = local_run.get_output(iteration = iteration)\n",
|
||||||
"print(third_run)\n",
|
"print(third_run)"
|
||||||
"print(third_model)"
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print_model(third_model)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -195,7 +195,7 @@
|
|||||||
" dsvm_compute = DsvmCompute.create(ws, name = dsvm_name, provisioning_configuration = dsvm_config)\n",
|
" dsvm_compute = DsvmCompute.create(ws, name = dsvm_name, provisioning_configuration = dsvm_config)\n",
|
||||||
" dsvm_compute.wait_for_completion(show_output = True)\n",
|
" dsvm_compute.wait_for_completion(show_output = True)\n",
|
||||||
" print(\"Waiting one minute for ssh to be accessible\")\n",
|
" print(\"Waiting one minute for ssh to be accessible\")\n",
|
||||||
" time.sleep(60) # Wait for ssh to be accessible"
|
" time.sleep(90) # Wait for ssh to be accessible"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -211,7 +211,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"conda_run_config.target = dsvm_compute\n",
|
"conda_run_config.target = dsvm_compute\n",
|
||||||
"\n",
|
"\n",
|
||||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy'])\n",
|
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy','py-xgboost<=0.80'])\n",
|
||||||
"conda_run_config.environment.python.conda_dependencies = cd"
|
"conda_run_config.environment.python.conda_dependencies = cd"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -163,8 +163,7 @@
|
|||||||
" \"iterations\" : 2,\n",
|
" \"iterations\" : 2,\n",
|
||||||
" \"primary_metric\" : 'AUC_weighted',\n",
|
" \"primary_metric\" : 'AUC_weighted',\n",
|
||||||
" \"preprocess\" : False,\n",
|
" \"preprocess\" : False,\n",
|
||||||
" \"verbosity\" : logging.INFO,\n",
|
" \"verbosity\" : logging.INFO\n",
|
||||||
" \"n_cross_validations\": 3\n",
|
|
||||||
"}"
|
"}"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -197,9 +197,9 @@
|
|||||||
"|**iterations**|Number of iterations. In each iteration, Auto ML trains a specific pipeline on the given data|\n",
|
"|**iterations**|Number of iterations. In each iteration, Auto ML trains a specific pipeline on the given data|\n",
|
||||||
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
||||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers. |\n",
|
"|**y**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
|
||||||
"|**X_valid**|Data used to evaluate a model in a iteration. (sparse) array-like, shape = [n_samples, n_features]|\n",
|
"|**X_valid**|Data used to evaluate a model in a iteration. (sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||||
"|**y_valid**|Data used to evaluate a model in a iteration. (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers. |\n",
|
"|**y_valid**|Data used to evaluate a model in a iteration. (sparse) array-like, shape = [n_samples, ], targets values.|\n",
|
||||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder. "
|
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder. "
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -154,12 +154,11 @@
|
|||||||
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
|
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
|
||||||
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
||||||
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
|
||||||
"|**preprocess**|Setting this to *True* enables AutoML to perform preprocessing on the input to handle *missing data*, and to perform some common *feature extraction*.|\n",
|
"|**preprocess**|Setting this to *True* enables AutoML to perform preprocessing on the input to handle *missing data*, and to perform some common *feature extraction*.|\n",
|
||||||
"|**experiment_exit_score**|*double* value indicating the target for *primary_metric*. <br>Once the target is surpassed the run terminates.|\n",
|
"|**experiment_exit_score**|*double* value indicating the target for *primary_metric*. <br>Once the target is surpassed the run terminates.|\n",
|
||||||
"|**blacklist_models**|*List* of *strings* indicating machine learning algorithms for AutoML to avoid in this run.<br><br> Allowed values for **Classification**<br><i>LogisticRegression</i><br><i>SGD</i><br><i>MultinomialNaiveBayes</i><br><i>BernoulliNaiveBayes</i><br><i>SVM</i><br><i>LinearSVM</i><br><i>KNN</i><br><i>DecisionTree</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>GradientBoosting</i><br><i>TensorFlowDNN</i><br><i>TensorFlowLinearClassifier</i><br><br>Allowed values for **Regression**<br><i>ElasticNet</i><br><i>GradientBoosting</i><br><i>DecisionTree</i><br><i>KNN</i><br><i>LassoLars</i><br><i>SGD</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>TensorFlowLinearRegressor</i><br><i>TensorFlowDNN</i>|\n",
|
"|**blacklist_models**|*List* of *strings* indicating machine learning algorithms for AutoML to avoid in this run.<br><br> Allowed values for **Classification**<br><i>LogisticRegression</i><br><i>SGD</i><br><i>MultinomialNaiveBayes</i><br><i>BernoulliNaiveBayes</i><br><i>SVM</i><br><i>LinearSVM</i><br><i>KNN</i><br><i>DecisionTree</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>GradientBoosting</i><br><i>TensorFlowDNN</i><br><i>TensorFlowLinearClassifier</i><br><br>Allowed values for **Regression**<br><i>ElasticNet</i><br><i>GradientBoosting</i><br><i>DecisionTree</i><br><i>KNN</i><br><i>LassoLars</i><br><i>SGD</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>TensorFlowLinearRegressor</i><br><i>TensorFlowDNN</i>|\n",
|
||||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
|
"|**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -174,7 +173,6 @@
|
|||||||
" primary_metric = 'AUC_weighted',\n",
|
" primary_metric = 'AUC_weighted',\n",
|
||||||
" iteration_timeout_minutes = 60,\n",
|
" iteration_timeout_minutes = 60,\n",
|
||||||
" iterations = 20,\n",
|
" iterations = 20,\n",
|
||||||
" n_cross_validations = 5,\n",
|
|
||||||
" preprocess = True,\n",
|
" preprocess = True,\n",
|
||||||
" experiment_exit_score = 0.9984,\n",
|
" experiment_exit_score = 0.9984,\n",
|
||||||
" blacklist_models = ['KNN','LinearSVM'],\n",
|
" blacklist_models = ['KNN','LinearSVM'],\n",
|
||||||
|
|||||||
@@ -140,9 +140,9 @@
|
|||||||
"|**max_time_sec**|Time limit in minutes for each iterations|\n",
|
"|**max_time_sec**|Time limit in minutes for each iterations|\n",
|
||||||
"|**iterations**|Number of iterations. In each iteration Auto ML trains the data with a specific pipeline|\n",
|
"|**iterations**|Number of iterations. In each iteration Auto ML trains the data with a specific pipeline|\n",
|
||||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers. |\n",
|
"|**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||||
"|**X_valid**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
"|**X_valid**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||||
"|**y_valid**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]|\n",
|
"|**y_valid**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||||
"|**model_explainability**|Indicate to explain each trained pipeline or not |\n",
|
"|**model_explainability**|Indicate to explain each trained pipeline or not |\n",
|
||||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder. |"
|
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder. |"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -137,7 +137,7 @@
|
|||||||
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
|
"|**y**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
|
||||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,555 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Automated Machine Learning\n",
|
||||||
|
"_**Remote Execution using AmlCompute**_\n",
|
||||||
|
"\n",
|
||||||
|
"## Contents\n",
|
||||||
|
"1. [Introduction](#Introduction)\n",
|
||||||
|
"1. [Setup](#Setup)\n",
|
||||||
|
"1. [Data](#Data)\n",
|
||||||
|
"1. [Train](#Train)\n",
|
||||||
|
"1. [Results](#Results)\n",
|
||||||
|
"1. [Test](#Test)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Introduction\n",
|
||||||
|
"In this example we use the scikit-learn's [digit dataset](http://scikit-learn.org/stable/datasets/index.html#optical-recognition-of-handwritten-digits-dataset) to showcase how you can use AutoML for a simple classification problem.\n",
|
||||||
|
"\n",
|
||||||
|
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
||||||
|
"\n",
|
||||||
|
"In this notebook you would see\n",
|
||||||
|
"1. Create an `Experiment` in an existing `Workspace`.\n",
|
||||||
|
"2. Create or Attach existing AmlCompute to a workspace.\n",
|
||||||
|
"3. Configure AutoML using `AutoMLConfig`.\n",
|
||||||
|
"4. Train the model using AmlCompute\n",
|
||||||
|
"5. Explore the results.\n",
|
||||||
|
"6. Test the best fitted model.\n",
|
||||||
|
"\n",
|
||||||
|
"In addition this notebook showcases the following features\n",
|
||||||
|
"- **Parallel** executions for iterations\n",
|
||||||
|
"- **Asynchronous** tracking of progress\n",
|
||||||
|
"- **Cancellation** of individual iterations or the entire run\n",
|
||||||
|
"- Retrieving models for any iteration or logged metric\n",
|
||||||
|
"- Specifying AutoML settings as `**kwargs`"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Setup\n",
|
||||||
|
"\n",
|
||||||
|
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import logging\n",
|
||||||
|
"import os\n",
|
||||||
|
"import csv\n",
|
||||||
|
"\n",
|
||||||
|
"from matplotlib import pyplot as plt\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"from sklearn import datasets\n",
|
||||||
|
"\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"from azureml.core.experiment import Experiment\n",
|
||||||
|
"from azureml.core.workspace import Workspace\n",
|
||||||
|
"from azureml.train.automl import AutoMLConfig"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"\n",
|
||||||
|
"# Choose a name for the run history container in the workspace.\n",
|
||||||
|
"experiment_name = 'automl-remote-amlcompute'\n",
|
||||||
|
"project_folder = './project'\n",
|
||||||
|
"\n",
|
||||||
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
|
"\n",
|
||||||
|
"output = {}\n",
|
||||||
|
"output['SDK version'] = azureml.core.VERSION\n",
|
||||||
|
"output['Subscription ID'] = ws.subscription_id\n",
|
||||||
|
"output['Workspace Name'] = ws.name\n",
|
||||||
|
"output['Resource Group'] = ws.resource_group\n",
|
||||||
|
"output['Location'] = ws.location\n",
|
||||||
|
"output['Project Directory'] = project_folder\n",
|
||||||
|
"output['Experiment Name'] = experiment.name\n",
|
||||||
|
"pd.set_option('display.max_colwidth', -1)\n",
|
||||||
|
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||||
|
"outputDf.T"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Create or Attach existing AmlCompute\n",
|
||||||
|
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you create `AmlCompute` as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||||
|
"\n",
|
||||||
|
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.compute import AmlCompute\n",
|
||||||
|
"from azureml.core.compute import ComputeTarget\n",
|
||||||
|
"\n",
|
||||||
|
"# Choose a name for your cluster.\n",
|
||||||
|
"amlcompute_cluster_name = \"automlcl\"\n",
|
||||||
|
"\n",
|
||||||
|
"found = False\n",
|
||||||
|
"# Check if this compute target already exists in the workspace.\n",
|
||||||
|
"cts = ws.compute_targets\n",
|
||||||
|
"if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':\n",
|
||||||
|
" found = True\n",
|
||||||
|
" print('Found existing compute target.')\n",
|
||||||
|
" compute_target = cts[amlcompute_cluster_name]\n",
|
||||||
|
" \n",
|
||||||
|
"if not found:\n",
|
||||||
|
" print('Creating a new compute target...')\n",
|
||||||
|
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\", # for GPU, use \"STANDARD_NC6\"\n",
|
||||||
|
" #vm_priority = 'lowpriority', # optional\n",
|
||||||
|
" max_nodes = 6)\n",
|
||||||
|
"\n",
|
||||||
|
" # Create the cluster.\n",
|
||||||
|
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)\n",
|
||||||
|
" \n",
|
||||||
|
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||||
|
" # If no min_node_count is provided, it will use the scale settings for the cluster.\n",
|
||||||
|
" compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)\n",
|
||||||
|
" \n",
|
||||||
|
" # For a more detailed view of current AmlCompute status, use get_status()."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Data\n",
|
||||||
|
"For remote executions, you need to make the data accessible from the remote compute.\n",
|
||||||
|
"This can be done by uploading the data to DataStore.\n",
|
||||||
|
"In this example, we upload scikit-learn's [load_digits](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html) data."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"data_train = datasets.load_digits()\n",
|
||||||
|
"\n",
|
||||||
|
"if not os.path.isdir('data'):\n",
|
||||||
|
" os.mkdir('data')\n",
|
||||||
|
" \n",
|
||||||
|
"if not os.path.exists(project_folder):\n",
|
||||||
|
" os.makedirs(project_folder)\n",
|
||||||
|
" \n",
|
||||||
|
"pd.DataFrame(data_train.data).to_csv(\"data/X_train.tsv\", index=False, header=False, quoting=csv.QUOTE_ALL, sep=\"\\t\")\n",
|
||||||
|
"pd.DataFrame(data_train.target).to_csv(\"data/y_train.tsv\", index=False, header=False, sep=\"\\t\")\n",
|
||||||
|
"\n",
|
||||||
|
"ds = ws.get_default_datastore()\n",
|
||||||
|
"ds.upload(src_dir='./data', target_path='bai_data', overwrite=True, show_progress=True)\n",
|
||||||
|
"\n",
|
||||||
|
"from azureml.core.runconfig import DataReferenceConfiguration\n",
|
||||||
|
"dr = DataReferenceConfiguration(datastore_name=ds.name, \n",
|
||||||
|
" path_on_datastore='bai_data', \n",
|
||||||
|
" path_on_compute='/tmp/azureml_runs',\n",
|
||||||
|
" mode='download', # download files from datastore to compute target\n",
|
||||||
|
" overwrite=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.runconfig import RunConfiguration\n",
|
||||||
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||||
|
"\n",
|
||||||
|
"# create a new RunConfig object\n",
|
||||||
|
"conda_run_config = RunConfiguration(framework=\"python\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Set compute target to AmlCompute\n",
|
||||||
|
"conda_run_config.target = compute_target\n",
|
||||||
|
"conda_run_config.environment.docker.enabled = True\n",
|
||||||
|
"conda_run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE\n",
|
||||||
|
"\n",
|
||||||
|
"# set the data reference of the run coonfiguration\n",
|
||||||
|
"conda_run_config.data_references = {ds.name: dr}\n",
|
||||||
|
"\n",
|
||||||
|
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy','py-xgboost<=0.80'])\n",
|
||||||
|
"conda_run_config.environment.python.conda_dependencies = cd"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%writefile $project_folder/get_data.py\n",
|
||||||
|
"\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"\n",
|
||||||
|
"def get_data():\n",
|
||||||
|
" X_train = pd.read_csv(\"/tmp/azureml_runs/bai_data/X_train.tsv\", delimiter=\"\\t\", header=None, quotechar='\"')\n",
|
||||||
|
" y_train = pd.read_csv(\"/tmp/azureml_runs/bai_data/y_train.tsv\", delimiter=\"\\t\", header=None, quotechar='\"')\n",
|
||||||
|
"\n",
|
||||||
|
" return { \"X\" : X_train.values, \"y\" : y_train[0].values }\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Train\n",
|
||||||
|
"\n",
|
||||||
|
"You can specify `automl_settings` as `**kwargs` as well. Also note that you can use a `get_data()` function for local excutions too.\n",
|
||||||
|
"\n",
|
||||||
|
"**Note:** When using AmlCompute, you can't pass Numpy arrays directly to the fit method.\n",
|
||||||
|
"\n",
|
||||||
|
"|Property|Description|\n",
|
||||||
|
"|-|-|\n",
|
||||||
|
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
|
||||||
|
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
||||||
|
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||||
|
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||||
|
"|**max_concurrent_iterations**|Maximum number of iterations that would be executed in parallel. This should be less than the number of cores on the DSVM.|"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"automl_settings = {\n",
|
||||||
|
" \"iteration_timeout_minutes\": 10,\n",
|
||||||
|
" \"iterations\": 20,\n",
|
||||||
|
" \"n_cross_validations\": 5,\n",
|
||||||
|
" \"primary_metric\": 'AUC_weighted',\n",
|
||||||
|
" \"preprocess\": False,\n",
|
||||||
|
" \"max_concurrent_iterations\": 5,\n",
|
||||||
|
" \"verbosity\": logging.INFO\n",
|
||||||
|
"}\n",
|
||||||
|
"\n",
|
||||||
|
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||||
|
" debug_log = 'automl_errors.log',\n",
|
||||||
|
" path = project_folder,\n",
|
||||||
|
" run_configuration=conda_run_config,\n",
|
||||||
|
" data_script = project_folder + \"/get_data.py\",\n",
|
||||||
|
" **automl_settings\n",
|
||||||
|
" )\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Call the `submit` method on the experiment object and pass the run configuration. For remote runs the execution is asynchronous, so you will see the iterations get populated as they complete. You can interact with the widgets and models even when the experiment is running to retrieve the best model up to that point. Once you are satisfied with the model, you can cancel a particular iteration or the whole run.\n",
|
||||||
|
"In this example, we specify `show_output = False` to suppress console output while the run is in progress."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"remote_run = experiment.submit(automl_config, show_output = False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"remote_run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Results\n",
|
||||||
|
"\n",
|
||||||
|
"#### Loading executed runs\n",
|
||||||
|
"In case you need to load a previously executed run, enable the cell below and replace the `run_id` value."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "raw",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"remote_run = AutoMLRun(experiment = experiment, run_id = 'AutoML_5db13491-c92a-4f1d-b622-8ab8d973a058')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Widget for Monitoring Runs\n",
|
||||||
|
"\n",
|
||||||
|
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
|
||||||
|
"\n",
|
||||||
|
"You can click on a pipeline to see run properties and output logs. Logs are also available on the DSVM under `/tmp/azureml_run/{iterationid}/azureml-logs`\n",
|
||||||
|
"\n",
|
||||||
|
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"remote_run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"RunDetails(remote_run).show() "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Wait until the run finishes.\n",
|
||||||
|
"remote_run.wait_for_completion(show_output = True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"\n",
|
||||||
|
"#### Retrieve All Child Runs\n",
|
||||||
|
"You can also use SDK methods to fetch all the child runs and see individual metrics that we log."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"children = list(remote_run.get_children())\n",
|
||||||
|
"metricslist = {}\n",
|
||||||
|
"for run in children:\n",
|
||||||
|
" properties = run.get_properties()\n",
|
||||||
|
" metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}\n",
|
||||||
|
" metricslist[int(properties['iteration'])] = metrics\n",
|
||||||
|
"\n",
|
||||||
|
"rundata = pd.DataFrame(metricslist).sort_index(1)\n",
|
||||||
|
"rundata"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Cancelling Runs\n",
|
||||||
|
"\n",
|
||||||
|
"You can cancel ongoing remote runs using the `cancel` and `cancel_iteration` functions."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Cancel the ongoing experiment and stop scheduling new iterations.\n",
|
||||||
|
"# remote_run.cancel()\n",
|
||||||
|
"\n",
|
||||||
|
"# Cancel iteration 1 and move onto iteration 2.\n",
|
||||||
|
"# remote_run.cancel_iteration(1)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Retrieve the Best Model\n",
|
||||||
|
"\n",
|
||||||
|
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"best_run, fitted_model = remote_run.get_output()\n",
|
||||||
|
"print(best_run)\n",
|
||||||
|
"print(fitted_model)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Best Model Based on Any Other Metric\n",
|
||||||
|
"Show the run and the model which has the smallest `log_loss` value:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"lookup_metric = \"log_loss\"\n",
|
||||||
|
"best_run, fitted_model = remote_run.get_output(metric = lookup_metric)\n",
|
||||||
|
"print(best_run)\n",
|
||||||
|
"print(fitted_model)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Model from a Specific Iteration\n",
|
||||||
|
"Show the run and the model from the third iteration:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"iteration = 3\n",
|
||||||
|
"third_run, third_model = remote_run.get_output(iteration=iteration)\n",
|
||||||
|
"print(third_run)\n",
|
||||||
|
"print(third_model)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Test\n",
|
||||||
|
"\n",
|
||||||
|
"#### Load Test Data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"digits = datasets.load_digits()\n",
|
||||||
|
"X_test = digits.data[:10, :]\n",
|
||||||
|
"y_test = digits.target[:10]\n",
|
||||||
|
"images = digits.images[:10]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Testing Our Best Fitted Model"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Randomly select digits and test.\n",
|
||||||
|
"for index in np.random.choice(len(y_test), 2, replace = False):\n",
|
||||||
|
" print(index)\n",
|
||||||
|
" predicted = fitted_model.predict(X_test[index:index + 1])[0]\n",
|
||||||
|
" label = y_test[index]\n",
|
||||||
|
" title = \"Label value = %d Predicted value = %d \" % (label, predicted)\n",
|
||||||
|
" fig = plt.figure(1, figsize=(3,3))\n",
|
||||||
|
" ax1 = fig.add_axes((0,0,.8,.8))\n",
|
||||||
|
" ax1.set_title(title)\n",
|
||||||
|
" plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')\n",
|
||||||
|
" plt.show()"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "savitam"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.6",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python36"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
@@ -167,7 +167,7 @@
|
|||||||
"# Set compute target to the Linux DSVM\n",
|
"# Set compute target to the Linux DSVM\n",
|
||||||
"conda_run_config.target = dsvm_compute\n",
|
"conda_run_config.target = dsvm_compute\n",
|
||||||
"\n",
|
"\n",
|
||||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy'])\n",
|
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy','py-xgboost<=0.80'])\n",
|
||||||
"conda_run_config.environment.python.conda_dependencies = cd"
|
"conda_run_config.environment.python.conda_dependencies = cd"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -128,7 +128,7 @@
|
|||||||
" dsvm_compute = DsvmCompute.create(ws, name=compute_target_name, provisioning_configuration=dsvm_config)\n",
|
" dsvm_compute = DsvmCompute.create(ws, name=compute_target_name, provisioning_configuration=dsvm_config)\n",
|
||||||
" dsvm_compute.wait_for_completion(show_output=True)\n",
|
" dsvm_compute.wait_for_completion(show_output=True)\n",
|
||||||
" print(\"Waiting one minute for ssh to be accessible\")\n",
|
" print(\"Waiting one minute for ssh to be accessible\")\n",
|
||||||
" time.sleep(60) # Wait for ssh to be accessible"
|
" time.sleep(90) # Wait for ssh to be accessible"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -254,7 +254,7 @@
|
|||||||
"# set the data reference of the run coonfiguration\n",
|
"# set the data reference of the run coonfiguration\n",
|
||||||
"conda_run_config.data_references = {ds.name: dr}\n",
|
"conda_run_config.data_references = {ds.name: dr}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy'])\n",
|
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy','py-xgboost<=0.80'])\n",
|
||||||
"conda_run_config.environment.python.conda_dependencies = cd"
|
"conda_run_config.environment.python.conda_dependencies = cd"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -193,7 +193,7 @@
|
|||||||
"# set the data reference of the run coonfiguration\n",
|
"# set the data reference of the run coonfiguration\n",
|
||||||
"conda_run_config.data_references = {ds.name: dr}\n",
|
"conda_run_config.data_references = {ds.name: dr}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy'])\n",
|
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy','py-xgboost<=0.80'])\n",
|
||||||
"conda_run_config.environment.python.conda_dependencies = cd"
|
"conda_run_config.environment.python.conda_dependencies = cd"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -156,9 +156,9 @@
|
|||||||
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||||
"|**preprocess**|Setting this to *True* enables AutoML to perform preprocessing on the input to handle *missing data*, and to perform some common *feature extraction*.<br>**Note:** If input data is sparse, you cannot use *True*.|\n",
|
"|**preprocess**|Setting this to *True* enables AutoML to perform preprocessing on the input to handle *missing data*, and to perform some common *feature extraction*.<br>**Note:** If input data is sparse, you cannot use *True*.|\n",
|
||||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
|
"|**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||||
"|**X_valid**|(sparse) array-like, shape = [n_samples, n_features] for the custom validation set.|\n",
|
"|**X_valid**|(sparse) array-like, shape = [n_samples, n_features] for the custom validation set.|\n",
|
||||||
"|**y_valid**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification for the custom validation set.|\n",
|
"|**y_valid**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ Notebook 6 is an Automated ML sample notebook for Classification.
|
|||||||
Learn more about [how to use Azure Databricks as a development environment](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-environment#azure-databricks) for Azure Machine Learning service.
|
Learn more about [how to use Azure Databricks as a development environment](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-environment#azure-databricks) for Azure Machine Learning service.
|
||||||
|
|
||||||
**Databricks as a Compute Target from AML Pipelines**
|
**Databricks as a Compute Target from AML Pipelines**
|
||||||
You can use Azure Databricks as a compute target from [Azure Machine Learning Pipelines](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines). Take a look at this notebook for details: [aml-pipelines-use-databricks-as-compute-target.ipynb](aml-pipelines-use-databricks-as-compute-target.ipynb).
|
You can use Azure Databricks as a compute target from [Azure Machine Learning Pipelines](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines). Take a look at this notebook for details: [aml-pipelines-use-databricks-as-compute-target.ipynb](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/aml-pipelines-use-databricks-as-compute-target.ipynb).
|
||||||
|
|
||||||
For more on SDK concepts, please refer to [notebooks](https://github.com/Azure/MachineLearningNotebooks).
|
For more on SDK concepts, please refer to [notebooks](https://github.com/Azure/MachineLearningNotebooks).
|
||||||
|
|
||||||
|
|||||||
@@ -1,714 +0,0 @@
|
|||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
|
||||||
"Licensed under the MIT License."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Using Databricks as a Compute Target from Azure Machine Learning Pipeline\n",
|
|
||||||
"To use Databricks as a compute target from [Azure Machine Learning Pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines), a [DatabricksStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.databricks_step.databricksstep?view=azure-ml-py) is used. This notebook demonstrates the use of DatabricksStep in Azure Machine Learning Pipeline.\n",
|
|
||||||
"\n",
|
|
||||||
"The notebook will show:\n",
|
|
||||||
"1. Running an arbitrary Databricks notebook that the customer has in Databricks workspace\n",
|
|
||||||
"2. Running an arbitrary Python script that the customer has in DBFS\n",
|
|
||||||
"3. Running an arbitrary Python script that is available on local computer (will upload to DBFS, and then run in Databricks) \n",
|
|
||||||
"4. Running a JAR job that the customer has in DBFS.\n",
|
|
||||||
"\n",
|
|
||||||
"## Before you begin:\n",
|
|
||||||
"\n",
|
|
||||||
"1. **Create an Azure Databricks workspace** in the same subscription where you have your Azure Machine Learning workspace. You will need details of this workspace later on to define DatabricksStep. [Click here](https://ms.portal.azure.com/#blade/HubsExtension/Resources/resourceType/Microsoft.Databricks%2Fworkspaces) for more information.\n",
|
|
||||||
"2. **Create PAT (access token)**: Manually create a Databricks access token at the Azure Databricks portal. See [this](https://docs.databricks.com/api/latest/authentication.html#generate-a-token) for more information.\n",
|
|
||||||
"3. **Add demo notebook to ADB**: This notebook has a sample you can use as is. Launch Azure Databricks attached to your Azure Machine Learning workspace and add a new notebook. \n",
|
|
||||||
"4. **Create/attach a Blob storage** for use from ADB"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Add demo notebook to ADB Workspace\n",
|
|
||||||
"Copy and paste the below code to create a new notebook in your ADB workspace."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"```python\n",
|
|
||||||
"# direct access\n",
|
|
||||||
"dbutils.widgets.get(\"myparam\")\n",
|
|
||||||
"p = getArgument(\"myparam\")\n",
|
|
||||||
"print (\"Param -\\'myparam':\")\n",
|
|
||||||
"print (p)\n",
|
|
||||||
"\n",
|
|
||||||
"dbutils.widgets.get(\"input\")\n",
|
|
||||||
"i = getArgument(\"input\")\n",
|
|
||||||
"print (\"Param -\\'input':\")\n",
|
|
||||||
"print (i)\n",
|
|
||||||
"\n",
|
|
||||||
"dbutils.widgets.get(\"output\")\n",
|
|
||||||
"o = getArgument(\"output\")\n",
|
|
||||||
"print (\"Param -\\'output':\")\n",
|
|
||||||
"print (o)\n",
|
|
||||||
"\n",
|
|
||||||
"n = i + \"/testdata.txt\"\n",
|
|
||||||
"df = spark.read.csv(n)\n",
|
|
||||||
"\n",
|
|
||||||
"display (df)\n",
|
|
||||||
"\n",
|
|
||||||
"data = [('value1', 'value2')]\n",
|
|
||||||
"df2 = spark.createDataFrame(data)\n",
|
|
||||||
"\n",
|
|
||||||
"z = o + \"/output.txt\"\n",
|
|
||||||
"df2.write.csv(z)\n",
|
|
||||||
"```"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Azure Machine Learning and Pipeline SDK-specific imports"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import os\n",
|
|
||||||
"import azureml.core\n",
|
|
||||||
"from azureml.core.runconfig import JarLibrary\n",
|
|
||||||
"from azureml.core.compute import ComputeTarget, DatabricksCompute\n",
|
|
||||||
"from azureml.exceptions import ComputeTargetException\n",
|
|
||||||
"from azureml.core import Workspace, Experiment\n",
|
|
||||||
"from azureml.pipeline.core import Pipeline, PipelineData\n",
|
|
||||||
"from azureml.pipeline.steps import DatabricksStep\n",
|
|
||||||
"from azureml.core.datastore import Datastore\n",
|
|
||||||
"from azureml.data.data_reference import DataReference\n",
|
|
||||||
"\n",
|
|
||||||
"# Check core SDK version number\n",
|
|
||||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Initialize Workspace\n",
|
|
||||||
"\n",
|
|
||||||
"Initialize a workspace object from persisted configuration. Make sure the config file is present at .\\config.json"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"ws = Workspace.from_config()\n",
|
|
||||||
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Attach Databricks compute target\n",
|
|
||||||
"Next, you need to add your Databricks workspace to Azure Machine Learning as a compute target and give it a name. You will use this name to refer to your Databricks workspace compute target inside Azure Machine Learning.\n",
|
|
||||||
"\n",
|
|
||||||
"- **Resource Group** - The resource group name of your Azure Machine Learning workspace\n",
|
|
||||||
"- **Databricks Workspace Name** - The workspace name of your Azure Databricks workspace\n",
|
|
||||||
"- **Databricks Access Token** - The access token you created in ADB\n",
|
|
||||||
"\n",
|
|
||||||
"**The Databricks workspace need to be present in the same subscription as your AML workspace**"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Replace with your account info before running.\n",
|
|
||||||
" \n",
|
|
||||||
"db_compute_name=os.getenv(\"DATABRICKS_COMPUTE_NAME\", \"<my-databricks-compute-name>\") # Databricks compute name\n",
|
|
||||||
"db_resource_group=os.getenv(\"DATABRICKS_RESOURCE_GROUP\", \"<my-db-resource-group>\") # Databricks resource group\n",
|
|
||||||
"db_workspace_name=os.getenv(\"DATABRICKS_WORKSPACE_NAME\", \"<my-db-workspace-name>\") # Databricks workspace name\n",
|
|
||||||
"db_access_token=os.getenv(\"DATABRICKS_ACCESS_TOKEN\", \"<my-access-token>\") # Databricks access token\n",
|
|
||||||
" \n",
|
|
||||||
"try:\n",
|
|
||||||
" databricks_compute = DatabricksCompute(workspace=ws, name=db_compute_name)\n",
|
|
||||||
" print('Compute target {} already exists'.format(db_compute_name))\n",
|
|
||||||
"except ComputeTargetException:\n",
|
|
||||||
" print('Compute not found, will use below parameters to attach new one')\n",
|
|
||||||
" print('db_compute_name {}'.format(db_compute_name))\n",
|
|
||||||
" print('db_resource_group {}'.format(db_resource_group))\n",
|
|
||||||
" print('db_workspace_name {}'.format(db_workspace_name))\n",
|
|
||||||
" print('db_access_token {}'.format(db_access_token))\n",
|
|
||||||
" \n",
|
|
||||||
" config = DatabricksCompute.attach_configuration(\n",
|
|
||||||
" resource_group = db_resource_group,\n",
|
|
||||||
" workspace_name = db_workspace_name,\n",
|
|
||||||
" access_token= db_access_token)\n",
|
|
||||||
" databricks_compute=ComputeTarget.attach(ws, db_compute_name, config)\n",
|
|
||||||
" databricks_compute.wait_for_completion(True)\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Data Connections with Inputs and Outputs\n",
|
|
||||||
"The DatabricksStep supports Azure Bloband ADLS for inputs and outputs. You also will need to define a [Secrets](https://docs.azuredatabricks.net/user-guide/secrets/index.html) scope to enable authentication to external data sources such as Blob and ADLS from Databricks.\n",
|
|
||||||
"\n",
|
|
||||||
"- Databricks documentation on [Azure Blob](https://docs.azuredatabricks.net/spark/latest/data-sources/azure/azure-storage.html)\n",
|
|
||||||
"- Databricks documentation on [ADLS](https://docs.databricks.com/spark/latest/data-sources/azure/azure-datalake.html)\n",
|
|
||||||
"\n",
|
|
||||||
"### Type of Data Access\n",
|
|
||||||
"Databricks allows to interact with Azure Blob and ADLS in two ways.\n",
|
|
||||||
"- **Direct Access**: Databricks allows you to interact with Azure Blob or ADLS URIs directly. The input or output URIs will be mapped to a Databricks widget param in the Databricks notebook.\n",
|
|
||||||
"- **Mounting**: You will be supplied with additional parameters and secrets that will enable you to mount your ADLS or Azure Blob input or output location in your Databricks notebook."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Direct Access: Python sample code\n",
|
|
||||||
"If you have a data reference named \"input\" it will represent the URI of the input and you can access it directly in the Databricks python notebook like so:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"```python\n",
|
|
||||||
"dbutils.widgets.get(\"input\")\n",
|
|
||||||
"y = getArgument(\"input\")\n",
|
|
||||||
"df = spark.read.csv(y)\n",
|
|
||||||
"```"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Mounting: Python sample code for Azure Blob\n",
|
|
||||||
"Given an Azure Blob data reference named \"input\" the following widget params will be made available in the Databricks notebook:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"```python\n",
|
|
||||||
"# This contains the input URI\n",
|
|
||||||
"dbutils.widgets.get(\"input\")\n",
|
|
||||||
"myinput_uri = getArgument(\"input\")\n",
|
|
||||||
"\n",
|
|
||||||
"# How to get the input datastore name inside ADB notebook\n",
|
|
||||||
"# This contains the name of a Databricks secret (in the predefined \"amlscope\" secret scope) \n",
|
|
||||||
"# that contians an access key or sas for the Azure Blob input (this name is obtained by appending \n",
|
|
||||||
"# the name of the input with \"_blob_secretname\". \n",
|
|
||||||
"dbutils.widgets.get(\"input_blob_secretname\") \n",
|
|
||||||
"myinput_blob_secretname = getArgument(\"input_blob_secretname\")\n",
|
|
||||||
"\n",
|
|
||||||
"# This contains the required configuration for mounting\n",
|
|
||||||
"dbutils.widgets.get(\"input_blob_config\")\n",
|
|
||||||
"myinput_blob_config = getArgument(\"input_blob_config\")\n",
|
|
||||||
"\n",
|
|
||||||
"# Usage\n",
|
|
||||||
"dbutils.fs.mount(\n",
|
|
||||||
" source = myinput_uri,\n",
|
|
||||||
" mount_point = \"/mnt/input\",\n",
|
|
||||||
" extra_configs = {myinput_blob_config:dbutils.secrets.get(scope = \"amlscope\", key = myinput_blob_secretname)})\n",
|
|
||||||
"```"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Mounting: Python sample code for ADLS\n",
|
|
||||||
"Given an ADLS data reference named \"input\" the following widget params will be made available in the Databricks notebook:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"```python\n",
|
|
||||||
"# This contains the input URI\n",
|
|
||||||
"dbutils.widgets.get(\"input\") \n",
|
|
||||||
"myinput_uri = getArgument(\"input\")\n",
|
|
||||||
"\n",
|
|
||||||
"# This contains the client id for the service principal \n",
|
|
||||||
"# that has access to the adls input\n",
|
|
||||||
"dbutils.widgets.get(\"input_adls_clientid\") \n",
|
|
||||||
"myinput_adls_clientid = getArgument(\"input_adls_clientid\")\n",
|
|
||||||
"\n",
|
|
||||||
"# This contains the name of a Databricks secret (in the predefined \"amlscope\" secret scope) \n",
|
|
||||||
"# that contains the secret for the above mentioned service principal\n",
|
|
||||||
"dbutils.widgets.get(\"input_adls_secretname\") \n",
|
|
||||||
"myinput_adls_secretname = getArgument(\"input_adls_secretname\")\n",
|
|
||||||
"\n",
|
|
||||||
"# This contains the refresh url for the mounting configs\n",
|
|
||||||
"dbutils.widgets.get(\"input_adls_refresh_url\") \n",
|
|
||||||
"myinput_adls_refresh_url = getArgument(\"input_adls_refresh_url\")\n",
|
|
||||||
"\n",
|
|
||||||
"# Usage \n",
|
|
||||||
"configs = {\"dfs.adls.oauth2.access.token.provider.type\": \"ClientCredential\",\n",
|
|
||||||
" \"dfs.adls.oauth2.client.id\": myinput_adls_clientid,\n",
|
|
||||||
" \"dfs.adls.oauth2.credential\": dbutils.secrets.get(scope = \"amlscope\", key =myinput_adls_secretname),\n",
|
|
||||||
" \"dfs.adls.oauth2.refresh.url\": myinput_adls_refresh_url}\n",
|
|
||||||
"\n",
|
|
||||||
"dbutils.fs.mount(\n",
|
|
||||||
" source = myinput_uri,\n",
|
|
||||||
" mount_point = \"/mnt/output\",\n",
|
|
||||||
" extra_configs = configs)\n",
|
|
||||||
"```"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Use Databricks from Azure Machine Learning Pipeline\n",
|
|
||||||
"To use Databricks as a compute target from Azure Machine Learning Pipeline, a DatabricksStep is used. Let's define a datasource (via DataReference) and intermediate data (via PipelineData) to be used in DatabricksStep."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Use the default blob storage\n",
|
|
||||||
"def_blob_store = Datastore(ws, \"workspaceblobstore\")\n",
|
|
||||||
"print('Datastore {} will be used'.format(def_blob_store.name))\n",
|
|
||||||
"\n",
|
|
||||||
"# We are uploading a sample file in the local directory to be used as a datasource\n",
|
|
||||||
"def_blob_store.upload_files(files=[\"./testdata.txt\"], target_path=\"dbtest\", overwrite=False)\n",
|
|
||||||
"\n",
|
|
||||||
"step_1_input = DataReference(datastore=def_blob_store, path_on_datastore=\"dbtest\",\n",
|
|
||||||
" data_reference_name=\"input\")\n",
|
|
||||||
"\n",
|
|
||||||
"step_1_output = PipelineData(\"output\", datastore=def_blob_store)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Add a DatabricksStep\n",
|
|
||||||
"Adds a Databricks notebook as a step in a Pipeline.\n",
|
|
||||||
"- ***name:** Name of the Module\n",
|
|
||||||
"- **inputs:** List of input connections for data consumed by this step. Fetch this inside the notebook using dbutils.widgets.get(\"input\")\n",
|
|
||||||
"- **outputs:** List of output port definitions for outputs produced by this step. Fetch this inside the notebook using dbutils.widgets.get(\"output\")\n",
|
|
||||||
"- **existing_cluster_id:** Cluster ID of an existing Interactive cluster on the Databricks workspace. If you are providing this, do not provide any of the parameters below that are used to create a new cluster such as spark_version, node_type, etc.\n",
|
|
||||||
"- **spark_version:** Version of spark for the databricks run cluster. default value: 4.0.x-scala2.11\n",
|
|
||||||
"- **node_type:** Azure vm node types for the databricks run cluster. default value: Standard_D3_v2\n",
|
|
||||||
"- **num_workers:** Specifies a static number of workers for the databricks run cluster\n",
|
|
||||||
"- **min_workers:** Specifies a min number of workers to use for auto-scaling the databricks run cluster\n",
|
|
||||||
"- **max_workers:** Specifies a max number of workers to use for auto-scaling the databricks run cluster\n",
|
|
||||||
"- **spark_env_variables:** Spark environment variables for the databricks run cluster (dictionary of {str:str}). default value: {'PYSPARK_PYTHON': '/databricks/python3/bin/python3'}\n",
|
|
||||||
"- **notebook_path:** Path to the notebook in the databricks instance. If you are providing this, do not provide python script related paramaters or JAR related parameters.\n",
|
|
||||||
"- **notebook_params:** Parameters for the databricks notebook (dictionary of {str:str}). Fetch this inside the notebook using dbutils.widgets.get(\"myparam\")\n",
|
|
||||||
"- **python_script_path:** The path to the python script in the DBFS or S3. If you are providing this, do not provide python_script_name which is used for uploading script from local machine.\n",
|
|
||||||
"- **python_script_params:** Parameters for the python script (list of str)\n",
|
|
||||||
"- **main_class_name:** The name of the entry point in a JAR module. If you are providing this, do not provide any python script or notebook related parameters.\n",
|
|
||||||
"- **jar_params:** Parameters for the JAR module (list of str)\n",
|
|
||||||
"- **python_script_name:** name of a python script on your local machine (relative to source_directory). If you are providing this do not provide python_script_path which is used to execute a remote python script; or any of the JAR or notebook related parameters.\n",
|
|
||||||
"- **source_directory:** folder that contains the script and other files\n",
|
|
||||||
"- **hash_paths:** list of paths to hash to detect a change in source_directory (script file is always hashed)\n",
|
|
||||||
"- **run_name:** Name in databricks for this run\n",
|
|
||||||
"- **timeout_seconds:** Timeout for the databricks run\n",
|
|
||||||
"- **runconfig:** Runconfig to use. Either pass runconfig or each library type as a separate parameter but do not mix the two\n",
|
|
||||||
"- **maven_libraries:** maven libraries for the databricks run\n",
|
|
||||||
"- **pypi_libraries:** pypi libraries for the databricks run\n",
|
|
||||||
"- **egg_libraries:** egg libraries for the databricks run\n",
|
|
||||||
"- **jar_libraries:** jar libraries for the databricks run\n",
|
|
||||||
"- **rcran_libraries:** rcran libraries for the databricks run\n",
|
|
||||||
"- **compute_target:** Azure Databricks compute\n",
|
|
||||||
"- **allow_reuse:** Whether the step should reuse previous results when run with the same settings/inputs\n",
|
|
||||||
"- **version:** Optional version tag to denote a change in functionality for the step\n",
|
|
||||||
"\n",
|
|
||||||
"\\* *denotes required fields* \n",
|
|
||||||
"*You must provide exactly one of num_workers or min_workers and max_workers paramaters* \n",
|
|
||||||
"*You must provide exactly one of databricks_compute or databricks_compute_name parameters*\n",
|
|
||||||
"\n",
|
|
||||||
"## Use runconfig to specify library dependencies\n",
|
|
||||||
"You can use a runconfig to specify the library dependencies for your cluster in Databricks. The runconfig will contain a databricks section as follows:\n",
|
|
||||||
"\n",
|
|
||||||
"```yaml\n",
|
|
||||||
"environment:\n",
|
|
||||||
"# Databricks details\n",
|
|
||||||
" databricks:\n",
|
|
||||||
"# List of maven libraries.\n",
|
|
||||||
" mavenLibraries:\n",
|
|
||||||
" - coordinates: org.jsoup:jsoup:1.7.1\n",
|
|
||||||
" repo: ''\n",
|
|
||||||
" exclusions:\n",
|
|
||||||
" - slf4j:slf4j\n",
|
|
||||||
" - '*:hadoop-client'\n",
|
|
||||||
"# List of PyPi libraries\n",
|
|
||||||
" pypiLibraries:\n",
|
|
||||||
" - package: beautifulsoup4\n",
|
|
||||||
" repo: ''\n",
|
|
||||||
"# List of RCran libraries\n",
|
|
||||||
" rcranLibraries:\n",
|
|
||||||
" -\n",
|
|
||||||
"# Coordinates.\n",
|
|
||||||
" package: ada\n",
|
|
||||||
"# Repo\n",
|
|
||||||
" repo: http://cran.us.r-project.org\n",
|
|
||||||
"# List of JAR libraries\n",
|
|
||||||
" jarLibraries:\n",
|
|
||||||
" -\n",
|
|
||||||
"# Coordinates.\n",
|
|
||||||
" library: dbfs:/mnt/libraries/library.jar\n",
|
|
||||||
"# List of Egg libraries\n",
|
|
||||||
" eggLibraries:\n",
|
|
||||||
" -\n",
|
|
||||||
"# Coordinates.\n",
|
|
||||||
" library: dbfs:/mnt/libraries/library.egg\n",
|
|
||||||
"```\n",
|
|
||||||
"\n",
|
|
||||||
"You can then create a RunConfiguration object using this file and pass it as the runconfig parameter to DatabricksStep.\n",
|
|
||||||
"```python\n",
|
|
||||||
"from azureml.core.runconfig import RunConfiguration\n",
|
|
||||||
"\n",
|
|
||||||
"runconfig = RunConfiguration()\n",
|
|
||||||
"runconfig.load(path='<directory_where_runconfig_is_stored>', name='<runconfig_file_name>')\n",
|
|
||||||
"```"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### 1. Running the demo notebook already added to the Databricks workspace\n",
|
|
||||||
"Create a notebook in the Azure Databricks workspace, and provide the path to that notebook as the value associated with the environment variable \"DATABRICKS_NOTEBOOK_PATH\". This will then set the variable notebook_path when you run the code cell below:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"notebook_path=os.getenv(\"DATABRICKS_NOTEBOOK_PATH\", \"<my-databricks-notebook-path>\") # Databricks notebook path\n",
|
|
||||||
"\n",
|
|
||||||
"dbNbStep = DatabricksStep(\n",
|
|
||||||
" name=\"DBNotebookInWS\",\n",
|
|
||||||
" inputs=[step_1_input],\n",
|
|
||||||
" outputs=[step_1_output],\n",
|
|
||||||
" num_workers=1,\n",
|
|
||||||
" notebook_path=notebook_path,\n",
|
|
||||||
" notebook_params={'myparam': 'testparam'},\n",
|
|
||||||
" run_name='DB_Notebook_demo',\n",
|
|
||||||
" compute_target=databricks_compute,\n",
|
|
||||||
" allow_reuse=True\n",
|
|
||||||
")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Build and submit the Experiment"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#PUBLISHONLY\n",
|
|
||||||
"#steps = [dbNbStep]\n",
|
|
||||||
"#pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
|
||||||
"#pipeline_run = Experiment(ws, 'DB_Notebook_demo').submit(pipeline)\n",
|
|
||||||
"#pipeline_run.wait_for_completion()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### View Run Details"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#PUBLISHONLY\n",
|
|
||||||
"#from azureml.widgets import RunDetails\n",
|
|
||||||
"#RunDetails(pipeline_run).show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### 2. Running a Python script from DBFS\n",
|
|
||||||
"This shows how to run a Python script in DBFS. \n",
|
|
||||||
"\n",
|
|
||||||
"To complete this, you will need to first upload the Python script in your local machine to DBFS using the [CLI](https://docs.azuredatabricks.net/user-guide/dbfs-databricks-file-system.html). The CLI command is given below:\n",
|
|
||||||
"\n",
|
|
||||||
"```\n",
|
|
||||||
"dbfs cp ./train-db-dbfs.py dbfs:/train-db-dbfs.py\n",
|
|
||||||
"```\n",
|
|
||||||
"\n",
|
|
||||||
"The code in the below cell assumes that you have completed the previous step of uploading the script `train-db-dbfs.py` to the root folder in DBFS."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"python_script_path = os.getenv(\"DATABRICKS_PYTHON_SCRIPT_PATH\", \"<my-databricks-python-script-path>\") # Databricks python script path\n",
|
|
||||||
"\n",
|
|
||||||
"dbPythonInDbfsStep = DatabricksStep(\n",
|
|
||||||
" name=\"DBPythonInDBFS\",\n",
|
|
||||||
" inputs=[step_1_input],\n",
|
|
||||||
" num_workers=1,\n",
|
|
||||||
" python_script_path=python_script_path,\n",
|
|
||||||
" python_script_params={'--input_data'},\n",
|
|
||||||
" run_name='DB_Python_demo',\n",
|
|
||||||
" compute_target=databricks_compute,\n",
|
|
||||||
" allow_reuse=True\n",
|
|
||||||
")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Build and submit the Experiment"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#PUBLISHONLY\n",
|
|
||||||
"#steps = [dbPythonInDbfsStep]\n",
|
|
||||||
"#pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
|
||||||
"#pipeline_run = Experiment(ws, 'DB_Python_demo').submit(pipeline)\n",
|
|
||||||
"#pipeline_run.wait_for_completion()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### View Run Details"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#PUBLISHONLY\n",
|
|
||||||
"#from azureml.widgets import RunDetails\n",
|
|
||||||
"#RunDetails(pipeline_run).show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### 3. Running a Python script in Databricks that currenlty is in local computer\n",
|
|
||||||
"To run a Python script that is currently in your local computer, follow the instructions below. \n",
|
|
||||||
"\n",
|
|
||||||
"The commented out code below code assumes that you have `train-db-local.py` in the `scripts` subdirectory under the current working directory.\n",
|
|
||||||
"\n",
|
|
||||||
"In this case, the Python script will be uploaded first to DBFS, and then the script will be run in Databricks."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"python_script_name = \"train-db-local.py\"\n",
|
|
||||||
"source_directory = \".\"\n",
|
|
||||||
"\n",
|
|
||||||
"dbPythonInLocalMachineStep = DatabricksStep(\n",
|
|
||||||
" name=\"DBPythonInLocalMachine\",\n",
|
|
||||||
" inputs=[step_1_input],\n",
|
|
||||||
" num_workers=1,\n",
|
|
||||||
" python_script_name=python_script_name,\n",
|
|
||||||
" source_directory=source_directory,\n",
|
|
||||||
" run_name='DB_Python_Local_demo',\n",
|
|
||||||
" compute_target=databricks_compute,\n",
|
|
||||||
" allow_reuse=True\n",
|
|
||||||
")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Build and submit the Experiment"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"steps = [dbPythonInLocalMachineStep]\n",
|
|
||||||
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
|
||||||
"pipeline_run = Experiment(ws, 'DB_Python_Local_demo').submit(pipeline)\n",
|
|
||||||
"pipeline_run.wait_for_completion()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### View Run Details"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from azureml.widgets import RunDetails\n",
|
|
||||||
"RunDetails(pipeline_run).show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### 4. Running a JAR job that is alreay added in DBFS\n",
|
|
||||||
"To run a JAR job that is already uploaded to DBFS, follow the instructions below. You will first upload the JAR file to DBFS using the [CLI](https://docs.azuredatabricks.net/user-guide/dbfs-databricks-file-system.html).\n",
|
|
||||||
"\n",
|
|
||||||
"The commented out code in the below cell assumes that you have uploaded `train-db-dbfs.jar` to the root folder in DBFS. You can upload `train-db-dbfs.jar` to the root folder in DBFS using this commandline so you can use `jar_library_dbfs_path = \"dbfs:/train-db-dbfs.jar\"`:\n",
|
|
||||||
"\n",
|
|
||||||
"```\n",
|
|
||||||
"dbfs cp ./train-db-dbfs.jar dbfs:/train-db-dbfs.jar\n",
|
|
||||||
"```"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"main_jar_class_name = \"com.microsoft.aeva.Main\"\n",
|
|
||||||
"jar_library_dbfs_path = os.getenv(\"DATABRICKS_JAR_LIB_PATH\", \"<my-databricks-jar-lib-path>\") # Databricks jar library path\n",
|
|
||||||
"\n",
|
|
||||||
"dbJarInDbfsStep = DatabricksStep(\n",
|
|
||||||
" name=\"DBJarInDBFS\",\n",
|
|
||||||
" inputs=[step_1_input],\n",
|
|
||||||
" num_workers=1,\n",
|
|
||||||
" main_class_name=main_jar_class_name,\n",
|
|
||||||
" jar_params={'arg1', 'arg2'},\n",
|
|
||||||
" run_name='DB_JAR_demo',\n",
|
|
||||||
" jar_libraries=[JarLibrary(jar_library_dbfs_path)],\n",
|
|
||||||
" compute_target=databricks_compute,\n",
|
|
||||||
" allow_reuse=True\n",
|
|
||||||
")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Build and submit the Experiment"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#PUBLISHONLY\n",
|
|
||||||
"#steps = [dbJarInDbfsStep]\n",
|
|
||||||
"#pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
|
||||||
"#pipeline_run = Experiment(ws, 'DB_JAR_demo').submit(pipeline)\n",
|
|
||||||
"#pipeline_run.wait_for_completion()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### View Run Details"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#PUBLISHONLY\n",
|
|
||||||
"#from azureml.widgets import RunDetails\n",
|
|
||||||
"#RunDetails(pipeline_run).show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Next: ADLA as a Compute Target\n",
|
|
||||||
"To use ADLA as a compute target from Azure Machine Learning Pipeline, a AdlaStep is used. This [notebook](./aml-pipelines-use-adla-as-compute-target.ipynb) demonstrates the use of AdlaStep in Azure Machine Learning Pipeline."
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"authors": [
|
|
||||||
{
|
|
||||||
"name": "diray"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3.6",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python36"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.6.2"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 2
|
|
||||||
}
|
|
||||||
@@ -271,11 +271,14 @@
|
|||||||
"from azureml.core import Datastore\n",
|
"from azureml.core import Datastore\n",
|
||||||
"\n",
|
"\n",
|
||||||
"datastore_name = 'demo_training'\n",
|
"datastore_name = 'demo_training'\n",
|
||||||
|
"container_name = 'digits' \n",
|
||||||
|
"account_name = 'automlpublicdatasets'\n",
|
||||||
"Datastore.register_azure_blob_container(\n",
|
"Datastore.register_azure_blob_container(\n",
|
||||||
" workspace = ws, \n",
|
" workspace = ws, \n",
|
||||||
" datastore_name = datastore_name, \n",
|
" datastore_name = datastore_name, \n",
|
||||||
" container_name = 'automl-notebook-data', \n",
|
" container_name = container_name, \n",
|
||||||
" account_name = 'dprepdata'\n",
|
" account_name = account_name,\n",
|
||||||
|
" overwrite = True\n",
|
||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -340,10 +343,10 @@
|
|||||||
"import azureml.dataprep as dprep\n",
|
"import azureml.dataprep as dprep\n",
|
||||||
"from azureml.data.datapath import DataPath\n",
|
"from azureml.data.datapath import DataPath\n",
|
||||||
"\n",
|
"\n",
|
||||||
"datastore = Datastore.get(workspace = ws, name = datastore_name)\n",
|
"datastore = Datastore.get(workspace = ws, datastore_name = datastore_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"X_train = dprep.read_csv(DataPath(datastore = datastore, path_on_datastore = 'X.csv')) \n",
|
"X_train = dprep.read_csv(datastore.path('X.csv'))\n",
|
||||||
"y_train = dprep.read_csv(DataPath(datastore = datastore, path_on_datastore = 'y.csv')).to_long(dprep.ColumnSelector(term='.*', use_regex = True))"
|
"y_train = dprep.read_csv(datastore.path('y.csv')).to_long(dprep.ColumnSelector(term='.*', use_regex = True))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -407,7 +410,7 @@
|
|||||||
" debug_log = 'automl_errors.log',\n",
|
" debug_log = 'automl_errors.log',\n",
|
||||||
" primary_metric = 'AUC_weighted',\n",
|
" primary_metric = 'AUC_weighted',\n",
|
||||||
" iteration_timeout_minutes = 10,\n",
|
" iteration_timeout_minutes = 10,\n",
|
||||||
" iterations = 5,\n",
|
" iterations = 3,\n",
|
||||||
" preprocess = True,\n",
|
" preprocess = True,\n",
|
||||||
" n_cross_validations = 10,\n",
|
" n_cross_validations = 10,\n",
|
||||||
" max_concurrent_iterations = 2, #change it based on number of worker nodes\n",
|
" max_concurrent_iterations = 2, #change it based on number of worker nodes\n",
|
||||||
@@ -433,7 +436,27 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"local_run = experiment.submit(automl_config, show_output = False) # for higher runs please use show_output=False and use the below"
|
"local_run = experiment.submit(automl_config, show_output = True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Continue experiment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"local_run.continue_experiment(iterations=2,\n",
|
||||||
|
" X=X_train, \n",
|
||||||
|
" y=y_train,\n",
|
||||||
|
" spark_context=sc,\n",
|
||||||
|
" show_output=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -548,11 +571,11 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from sklearn import datasets\n",
|
"blob_location = \"https://{}.blob.core.windows.net/{}\".format(account_name, container_name)\n",
|
||||||
"digits = datasets.load_digits()\n",
|
"X_test = pd.read_csv(\"{}./X_valid.csv\".format(blob_location), header=0)\n",
|
||||||
"X_test = digits.data[:10, :]\n",
|
"y_test = pd.read_csv(\"{}/y_valid.csv\".format(blob_location), header=0)\n",
|
||||||
"y_test = digits.target[:10]\n",
|
"images = pd.read_csv(\"{}/images.csv\".format(blob_location), header=None)\n",
|
||||||
"images = digits.images[:10]"
|
"images = np.reshape(images.values, (100,8,8))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -573,9 +596,9 @@
|
|||||||
"for index in np.random.choice(len(y_test), 2, replace = False):\n",
|
"for index in np.random.choice(len(y_test), 2, replace = False):\n",
|
||||||
" print(index)\n",
|
" print(index)\n",
|
||||||
" predicted = fitted_model.predict(X_test[index:index + 1])[0]\n",
|
" predicted = fitted_model.predict(X_test[index:index + 1])[0]\n",
|
||||||
" label = y_test[index]\n",
|
" label = y_test.values[index]\n",
|
||||||
" title = \"Label value = %d Predicted value = %d \" % (label, predicted)\n",
|
" title = \"Label value = %d Predicted value = %d \" % (label, predicted)\n",
|
||||||
" fig = plt.figure(1, figsize = (3,3))\n",
|
" fig = plt.figure(3, figsize = (5,5))\n",
|
||||||
" ax1 = fig.add_axes((0,0,.8,.8))\n",
|
" ax1 = fig.add_axes((0,0,.8,.8))\n",
|
||||||
" ax1.set_title(title)\n",
|
" ax1.set_title(title)\n",
|
||||||
" plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')\n",
|
" plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')\n",
|
||||||
@@ -605,7 +628,7 @@
|
|||||||
"name": "savitam"
|
"name": "savitam"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "wamartin"
|
"name": "sasum"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
|
|||||||
@@ -207,6 +207,7 @@
|
|||||||
"import os\n",
|
"import os\n",
|
||||||
"import random\n",
|
"import random\n",
|
||||||
"import time\n",
|
"import time\n",
|
||||||
|
"import json\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from matplotlib import pyplot as plt\n",
|
"from matplotlib import pyplot as plt\n",
|
||||||
"from matplotlib.pyplot import imshow\n",
|
"from matplotlib.pyplot import imshow\n",
|
||||||
@@ -288,11 +289,14 @@
|
|||||||
"from azureml.core import Datastore\n",
|
"from azureml.core import Datastore\n",
|
||||||
"\n",
|
"\n",
|
||||||
"datastore_name = 'demo_training'\n",
|
"datastore_name = 'demo_training'\n",
|
||||||
|
"container_name = 'digits' \n",
|
||||||
|
"account_name = 'automlpublicdatasets'\n",
|
||||||
"Datastore.register_azure_blob_container(\n",
|
"Datastore.register_azure_blob_container(\n",
|
||||||
" workspace = ws, \n",
|
" workspace = ws, \n",
|
||||||
" datastore_name = datastore_name, \n",
|
" datastore_name = datastore_name, \n",
|
||||||
" container_name = 'automl-notebook-data', \n",
|
" container_name = container_name, \n",
|
||||||
" account_name = 'dprepdata'\n",
|
" account_name = account_name,\n",
|
||||||
|
" overwrite = True\n",
|
||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -357,10 +361,10 @@
|
|||||||
"import azureml.dataprep as dprep\n",
|
"import azureml.dataprep as dprep\n",
|
||||||
"from azureml.data.datapath import DataPath\n",
|
"from azureml.data.datapath import DataPath\n",
|
||||||
"\n",
|
"\n",
|
||||||
"datastore = Datastore.get(workspace = ws, name = datastore_name)\n",
|
"datastore = Datastore.get(workspace = ws, datastore_name = datastore_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"X_train = dprep.read_csv(DataPath(datastore = datastore, path_on_datastore = 'X.csv')) \n",
|
"X_train = dprep.read_csv(datastore.path('X.csv'))\n",
|
||||||
"y_train = dprep.read_csv(DataPath(datastore = datastore, path_on_datastore = 'y.csv')).to_long(dprep.ColumnSelector(term='.*', use_regex = True))"
|
"y_train = dprep.read_csv(datastore.path('y.csv')).to_long(dprep.ColumnSelector(term='.*', use_regex = True))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -424,7 +428,7 @@
|
|||||||
" debug_log = 'automl_errors.log',\n",
|
" debug_log = 'automl_errors.log',\n",
|
||||||
" primary_metric = 'AUC_weighted',\n",
|
" primary_metric = 'AUC_weighted',\n",
|
||||||
" iteration_timeout_minutes = 10,\n",
|
" iteration_timeout_minutes = 10,\n",
|
||||||
" iterations = 30,\n",
|
" iterations = 5,\n",
|
||||||
" preprocess = True,\n",
|
" preprocess = True,\n",
|
||||||
" n_cross_validations = 10,\n",
|
" n_cross_validations = 10,\n",
|
||||||
" max_concurrent_iterations = 2, #change it based on number of worker nodes\n",
|
" max_concurrent_iterations = 2, #change it based on number of worker nodes\n",
|
||||||
@@ -450,7 +454,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"local_run = experiment.submit(automl_config, show_output = False) # for higher runs please use show_output=False and use the below"
|
"local_run = experiment.submit(automl_config, show_output = True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -588,22 +592,21 @@
|
|||||||
"%%writefile score.py\n",
|
"%%writefile score.py\n",
|
||||||
"import pickle\n",
|
"import pickle\n",
|
||||||
"import json\n",
|
"import json\n",
|
||||||
"import numpy\n",
|
"import numpy as np\n",
|
||||||
"import azureml.train.automl\n",
|
"import azureml.train.automl\n",
|
||||||
"from sklearn.externals import joblib\n",
|
"from sklearn.externals import joblib\n",
|
||||||
"from azureml.core.model import Model\n",
|
"from azureml.core.model import Model\n",
|
||||||
"\n",
|
"import pandas as pd\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def init():\n",
|
"def init():\n",
|
||||||
" global model\n",
|
" global model\n",
|
||||||
" model_path = Model.get_model_path(model_name = '<<modelid>>') # this name is model.id of model that we want to deploy\n",
|
" model_path = Model.get_model_path(model_name = '<<model_id>>') # this name is model.id of model that we want to deploy\n",
|
||||||
" # deserialize the model file back into a sklearn model\n",
|
" # deserialize the model file back into a sklearn model\n",
|
||||||
" model = joblib.load(model_path)\n",
|
" model = joblib.load(model_path)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def run(rawdata):\n",
|
"def run(raw_data):\n",
|
||||||
" try:\n",
|
" try:\n",
|
||||||
" data = json.loads(rawdata)['data']\n",
|
" data = (pd.DataFrame(np.array(json.loads(raw_data)['data']), columns=[str(i) for i in range(0,64)]))\n",
|
||||||
" data = numpy.array(data)\n",
|
|
||||||
" result = model.predict(data)\n",
|
" result = model.predict(data)\n",
|
||||||
" except Exception as e:\n",
|
" except Exception as e:\n",
|
||||||
" result = str(e)\n",
|
" result = str(e)\n",
|
||||||
@@ -611,6 +614,22 @@
|
|||||||
" return json.dumps({\"result\":result.tolist()})"
|
" return json.dumps({\"result\":result.tolist()})"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#Replace <<model_id>>\n",
|
||||||
|
"content = \"\"\n",
|
||||||
|
"with open(\"score.py\", \"r\") as fo:\n",
|
||||||
|
" content = fo.read()\n",
|
||||||
|
"\n",
|
||||||
|
"new_content = content.replace(\"<<model_id>>\", local_run.model_id)\n",
|
||||||
|
"with open(\"score.py\", \"w\") as fw:\n",
|
||||||
|
" fw.write(new_content)"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -669,16 +688,19 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"\n",
|
|
||||||
"# this will take 10-15 minutes to finish\n",
|
"# this will take 10-15 minutes to finish\n",
|
||||||
"\n",
|
"\n",
|
||||||
"service_name = \"<<servicename>>\"\n",
|
"import uuid\n",
|
||||||
|
"from azureml.core.image import ContainerImage\n",
|
||||||
|
"\n",
|
||||||
|
"guid = str(uuid.uuid4()).split(\"-\")[0]\n",
|
||||||
|
"service_name = \"myservice-{}\".format(guid)\n",
|
||||||
|
"print(\"Creating service with name: {}\".format(service_name))\n",
|
||||||
"runtime = \"spark-py\" \n",
|
"runtime = \"spark-py\" \n",
|
||||||
"driver_file = \"score.py\"\n",
|
"driver_file = \"score.py\"\n",
|
||||||
"my_conda_file = \"mydeployenv.yml\"\n",
|
"my_conda_file = \"mydeployenv.yml\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# image creation\n",
|
"# image creation\n",
|
||||||
"from azureml.core.image import ContainerImage\n",
|
|
||||||
"myimage_config = ContainerImage.image_configuration(execution_script = driver_file, \n",
|
"myimage_config = ContainerImage.image_configuration(execution_script = driver_file, \n",
|
||||||
" runtime = runtime, \n",
|
" runtime = runtime, \n",
|
||||||
" conda_file = 'mydeployenv.yml')\n",
|
" conda_file = 'mydeployenv.yml')\n",
|
||||||
@@ -720,11 +742,11 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from sklearn import datasets\n",
|
"blob_location = \"https://{}.blob.core.windows.net/{}\".format(account_name, container_name)\n",
|
||||||
"digits = datasets.load_digits()\n",
|
"X_test = pd.read_csv(\"{}./X_valid.csv\".format(blob_location), header=0)\n",
|
||||||
"X_test = digits.data[:10, :]\n",
|
"y_test = pd.read_csv(\"{}/y_valid.csv\".format(blob_location), header=0)\n",
|
||||||
"y_test = digits.target[:10]\n",
|
"images = pd.read_csv(\"{}/images.csv\".format(blob_location), header=None)\n",
|
||||||
"images = digits.images[:10]"
|
"images = np.reshape(images.values, (100,8,8))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -741,18 +763,39 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
"import json\n",
|
||||||
"# Randomly select digits and test.\n",
|
"# Randomly select digits and test.\n",
|
||||||
"for index in np.random.choice(len(y_test), 2, replace = False):\n",
|
"for index in np.random.choice(len(y_test), 2, replace = False):\n",
|
||||||
" print(index)\n",
|
" print(index)\n",
|
||||||
" predicted = fitted_model.predict(X_test[index:index + 1])[0]\n",
|
" test_sample = json.dumps({'data':X_test[index:index + 1].values.tolist()})\n",
|
||||||
" label = y_test[index]\n",
|
" predicted = myservice.run(input_data = test_sample)\n",
|
||||||
" title = \"Label value = %d Predicted value = %d \" % (label, predicted)\n",
|
" label = y_test.values[index]\n",
|
||||||
" fig = plt.figure(1, figsize = (3,3))\n",
|
" predictedDict = json.loads(predicted)\n",
|
||||||
|
" title = \"Label value = %d Predicted value = %s \" % ( label,predictedDict['result'][0]) \n",
|
||||||
|
" fig = plt.figure(3, figsize = (5,5))\n",
|
||||||
" ax1 = fig.add_axes((0,0,.8,.8))\n",
|
" ax1 = fig.add_axes((0,0,.8,.8))\n",
|
||||||
" ax1.set_title(title)\n",
|
" ax1.set_title(title)\n",
|
||||||
" plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')\n",
|
" plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')\n",
|
||||||
" display(fig)"
|
" display(fig)"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"### Delete the service"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"myservice.delete()"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
@@ -761,7 +804,7 @@
|
|||||||
"name": "savitam"
|
"name": "savitam"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "wamartin"
|
"name": "sasum"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
|
|||||||
@@ -0,0 +1,16 @@
|
|||||||
|
# Using Databricks as a Compute Target from Azure Machine Learning Pipeline
|
||||||
|
To use Databricks as a compute target from Azure Machine Learning Pipeline, a DatabricksStep is used. This notebook demonstrates the use of DatabricksStep in Azure Machine Learning Pipeline.
|
||||||
|
|
||||||
|
The notebook will show:
|
||||||
|
|
||||||
|
1. Running an arbitrary Databricks notebook that the customer has in Databricks workspace
|
||||||
|
2. Running an arbitrary Python script that the customer has in DBFS
|
||||||
|
3. Running an arbitrary Python script that is available on local computer (will upload to DBFS, and then run in Databricks)
|
||||||
|
4. Running a JAR job that the customer has in DBFS.
|
||||||
|
|
||||||
|
## Before you begin:
|
||||||
|
1. **Create an Azure Databricks workspace** in the same subscription where you have your Azure Machine Learning workspace.
|
||||||
|
You will need details of this workspace later on to define DatabricksStep. [More information](https://ms.portal.azure.com/#blade/HubsExtension/Resources/resourceType/Microsoft.Databricks%2Fworkspaces).
|
||||||
|
2. **Create PAT (access token)** at the Azure Databricks portal. [More information](https://docs.databricks.com/api/latest/authentication.html#generate-a-token).
|
||||||
|
3. **Add demo notebook to ADB** This notebook has a sample you can use as is. Launch Azure Databricks attached to your Azure Machine Learning workspace and add a new notebook.
|
||||||
|
4. **Create/attach a Blob storage** for use from ADB
|
||||||
@@ -0,0 +1,708 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Using Databricks as a Compute Target from Azure Machine Learning Pipeline\n",
|
||||||
|
"To use Databricks as a compute target from [Azure Machine Learning Pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines), a [DatabricksStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.databricks_step.databricksstep?view=azure-ml-py) is used. This notebook demonstrates the use of DatabricksStep in Azure Machine Learning Pipeline.\n",
|
||||||
|
"\n",
|
||||||
|
"The notebook will show:\n",
|
||||||
|
"1. Running an arbitrary Databricks notebook that the customer has in Databricks workspace\n",
|
||||||
|
"2. Running an arbitrary Python script that the customer has in DBFS\n",
|
||||||
|
"3. Running an arbitrary Python script that is available on local computer (will upload to DBFS, and then run in Databricks) \n",
|
||||||
|
"4. Running a JAR job that the customer has in DBFS.\n",
|
||||||
|
"\n",
|
||||||
|
"## Before you begin:\n",
|
||||||
|
"\n",
|
||||||
|
"1. **Create an Azure Databricks workspace** in the same subscription where you have your Azure Machine Learning workspace. You will need details of this workspace later on to define DatabricksStep. [Click here](https://ms.portal.azure.com/#blade/HubsExtension/Resources/resourceType/Microsoft.Databricks%2Fworkspaces) for more information.\n",
|
||||||
|
"2. **Create PAT (access token)**: Manually create a Databricks access token at the Azure Databricks portal. See [this](https://docs.databricks.com/api/latest/authentication.html#generate-a-token) for more information.\n",
|
||||||
|
"3. **Add demo notebook to ADB**: This notebook has a sample you can use as is. Launch Azure Databricks attached to your Azure Machine Learning workspace and add a new notebook. \n",
|
||||||
|
"4. **Create/attach a Blob storage** for use from ADB"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Add demo notebook to ADB Workspace\n",
|
||||||
|
"Copy and paste the below code to create a new notebook in your ADB workspace."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"```python\n",
|
||||||
|
"# direct access\n",
|
||||||
|
"dbutils.widgets.get(\"myparam\")\n",
|
||||||
|
"p = getArgument(\"myparam\")\n",
|
||||||
|
"print (\"Param -\\'myparam':\")\n",
|
||||||
|
"print (p)\n",
|
||||||
|
"\n",
|
||||||
|
"dbutils.widgets.get(\"input\")\n",
|
||||||
|
"i = getArgument(\"input\")\n",
|
||||||
|
"print (\"Param -\\'input':\")\n",
|
||||||
|
"print (i)\n",
|
||||||
|
"\n",
|
||||||
|
"dbutils.widgets.get(\"output\")\n",
|
||||||
|
"o = getArgument(\"output\")\n",
|
||||||
|
"print (\"Param -\\'output':\")\n",
|
||||||
|
"print (o)\n",
|
||||||
|
"\n",
|
||||||
|
"n = i + \"/testdata.txt\"\n",
|
||||||
|
"df = spark.read.csv(n)\n",
|
||||||
|
"\n",
|
||||||
|
"display (df)\n",
|
||||||
|
"\n",
|
||||||
|
"data = [('value1', 'value2')]\n",
|
||||||
|
"df2 = spark.createDataFrame(data)\n",
|
||||||
|
"\n",
|
||||||
|
"z = o + \"/output.txt\"\n",
|
||||||
|
"df2.write.csv(z)\n",
|
||||||
|
"```"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Azure Machine Learning and Pipeline SDK-specific imports"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"from azureml.core.runconfig import JarLibrary\n",
|
||||||
|
"from azureml.core.compute import ComputeTarget, DatabricksCompute\n",
|
||||||
|
"from azureml.exceptions import ComputeTargetException\n",
|
||||||
|
"from azureml.core import Workspace, Experiment\n",
|
||||||
|
"from azureml.pipeline.core import Pipeline, PipelineData\n",
|
||||||
|
"from azureml.pipeline.steps import DatabricksStep\n",
|
||||||
|
"from azureml.core.datastore import Datastore\n",
|
||||||
|
"from azureml.data.data_reference import DataReference\n",
|
||||||
|
"\n",
|
||||||
|
"# Check core SDK version number\n",
|
||||||
|
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Initialize Workspace\n",
|
||||||
|
"\n",
|
||||||
|
"Initialize a workspace object from persisted configuration. Make sure the config file is present at .\\config.json"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Attach Databricks compute target\n",
|
||||||
|
"Next, you need to add your Databricks workspace to Azure Machine Learning as a compute target and give it a name. You will use this name to refer to your Databricks workspace compute target inside Azure Machine Learning.\n",
|
||||||
|
"\n",
|
||||||
|
"- **Resource Group** - The resource group name of your Azure Machine Learning workspace\n",
|
||||||
|
"- **Databricks Workspace Name** - The workspace name of your Azure Databricks workspace\n",
|
||||||
|
"- **Databricks Access Token** - The access token you created in ADB\n",
|
||||||
|
"\n",
|
||||||
|
"**The Databricks workspace need to be present in the same subscription as your AML workspace**"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Replace with your account info before running.\n",
|
||||||
|
" \n",
|
||||||
|
"db_compute_name=os.getenv(\"DATABRICKS_COMPUTE_NAME\", \"<my-databricks-compute-name>\") # Databricks compute name\n",
|
||||||
|
"db_resource_group=os.getenv(\"DATABRICKS_RESOURCE_GROUP\", \"<my-db-resource-group>\") # Databricks resource group\n",
|
||||||
|
"db_workspace_name=os.getenv(\"DATABRICKS_WORKSPACE_NAME\", \"<my-db-workspace-name>\") # Databricks workspace name\n",
|
||||||
|
"db_access_token=os.getenv(\"DATABRICKS_ACCESS_TOKEN\", \"<my-access-token>\") # Databricks access token\n",
|
||||||
|
" \n",
|
||||||
|
"try:\n",
|
||||||
|
" databricks_compute = DatabricksCompute(workspace=ws, name=db_compute_name)\n",
|
||||||
|
" print('Compute target {} already exists'.format(db_compute_name))\n",
|
||||||
|
"except ComputeTargetException:\n",
|
||||||
|
" print('Compute not found, will use below parameters to attach new one')\n",
|
||||||
|
" print('db_compute_name {}'.format(db_compute_name))\n",
|
||||||
|
" print('db_resource_group {}'.format(db_resource_group))\n",
|
||||||
|
" print('db_workspace_name {}'.format(db_workspace_name))\n",
|
||||||
|
" print('db_access_token {}'.format(db_access_token))\n",
|
||||||
|
" \n",
|
||||||
|
" config = DatabricksCompute.attach_configuration(\n",
|
||||||
|
" resource_group = db_resource_group,\n",
|
||||||
|
" workspace_name = db_workspace_name,\n",
|
||||||
|
" access_token= db_access_token)\n",
|
||||||
|
" databricks_compute=ComputeTarget.attach(ws, db_compute_name, config)\n",
|
||||||
|
" databricks_compute.wait_for_completion(True)\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Data Connections with Inputs and Outputs\n",
|
||||||
|
"The DatabricksStep supports Azure Bloband ADLS for inputs and outputs. You also will need to define a [Secrets](https://docs.azuredatabricks.net/user-guide/secrets/index.html) scope to enable authentication to external data sources such as Blob and ADLS from Databricks.\n",
|
||||||
|
"\n",
|
||||||
|
"- Databricks documentation on [Azure Blob](https://docs.azuredatabricks.net/spark/latest/data-sources/azure/azure-storage.html)\n",
|
||||||
|
"- Databricks documentation on [ADLS](https://docs.databricks.com/spark/latest/data-sources/azure/azure-datalake.html)\n",
|
||||||
|
"\n",
|
||||||
|
"### Type of Data Access\n",
|
||||||
|
"Databricks allows to interact with Azure Blob and ADLS in two ways.\n",
|
||||||
|
"- **Direct Access**: Databricks allows you to interact with Azure Blob or ADLS URIs directly. The input or output URIs will be mapped to a Databricks widget param in the Databricks notebook.\n",
|
||||||
|
"- **Mounting**: You will be supplied with additional parameters and secrets that will enable you to mount your ADLS or Azure Blob input or output location in your Databricks notebook."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Direct Access: Python sample code\n",
|
||||||
|
"If you have a data reference named \"input\" it will represent the URI of the input and you can access it directly in the Databricks python notebook like so:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"```python\n",
|
||||||
|
"dbutils.widgets.get(\"input\")\n",
|
||||||
|
"y = getArgument(\"input\")\n",
|
||||||
|
"df = spark.read.csv(y)\n",
|
||||||
|
"```"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Mounting: Python sample code for Azure Blob\n",
|
||||||
|
"Given an Azure Blob data reference named \"input\" the following widget params will be made available in the Databricks notebook:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"```python\n",
|
||||||
|
"# This contains the input URI\n",
|
||||||
|
"dbutils.widgets.get(\"input\")\n",
|
||||||
|
"myinput_uri = getArgument(\"input\")\n",
|
||||||
|
"\n",
|
||||||
|
"# How to get the input datastore name inside ADB notebook\n",
|
||||||
|
"# This contains the name of a Databricks secret (in the predefined \"amlscope\" secret scope) \n",
|
||||||
|
"# that contians an access key or sas for the Azure Blob input (this name is obtained by appending \n",
|
||||||
|
"# the name of the input with \"_blob_secretname\". \n",
|
||||||
|
"dbutils.widgets.get(\"input_blob_secretname\") \n",
|
||||||
|
"myinput_blob_secretname = getArgument(\"input_blob_secretname\")\n",
|
||||||
|
"\n",
|
||||||
|
"# This contains the required configuration for mounting\n",
|
||||||
|
"dbutils.widgets.get(\"input_blob_config\")\n",
|
||||||
|
"myinput_blob_config = getArgument(\"input_blob_config\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Usage\n",
|
||||||
|
"dbutils.fs.mount(\n",
|
||||||
|
" source = myinput_uri,\n",
|
||||||
|
" mount_point = \"/mnt/input\",\n",
|
||||||
|
" extra_configs = {myinput_blob_config:dbutils.secrets.get(scope = \"amlscope\", key = myinput_blob_secretname)})\n",
|
||||||
|
"```"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Mounting: Python sample code for ADLS\n",
|
||||||
|
"Given an ADLS data reference named \"input\" the following widget params will be made available in the Databricks notebook:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"```python\n",
|
||||||
|
"# This contains the input URI\n",
|
||||||
|
"dbutils.widgets.get(\"input\") \n",
|
||||||
|
"myinput_uri = getArgument(\"input\")\n",
|
||||||
|
"\n",
|
||||||
|
"# This contains the client id for the service principal \n",
|
||||||
|
"# that has access to the adls input\n",
|
||||||
|
"dbutils.widgets.get(\"input_adls_clientid\") \n",
|
||||||
|
"myinput_adls_clientid = getArgument(\"input_adls_clientid\")\n",
|
||||||
|
"\n",
|
||||||
|
"# This contains the name of a Databricks secret (in the predefined \"amlscope\" secret scope) \n",
|
||||||
|
"# that contains the secret for the above mentioned service principal\n",
|
||||||
|
"dbutils.widgets.get(\"input_adls_secretname\") \n",
|
||||||
|
"myinput_adls_secretname = getArgument(\"input_adls_secretname\")\n",
|
||||||
|
"\n",
|
||||||
|
"# This contains the refresh url for the mounting configs\n",
|
||||||
|
"dbutils.widgets.get(\"input_adls_refresh_url\") \n",
|
||||||
|
"myinput_adls_refresh_url = getArgument(\"input_adls_refresh_url\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Usage \n",
|
||||||
|
"configs = {\"dfs.adls.oauth2.access.token.provider.type\": \"ClientCredential\",\n",
|
||||||
|
" \"dfs.adls.oauth2.client.id\": myinput_adls_clientid,\n",
|
||||||
|
" \"dfs.adls.oauth2.credential\": dbutils.secrets.get(scope = \"amlscope\", key =myinput_adls_secretname),\n",
|
||||||
|
" \"dfs.adls.oauth2.refresh.url\": myinput_adls_refresh_url}\n",
|
||||||
|
"\n",
|
||||||
|
"dbutils.fs.mount(\n",
|
||||||
|
" source = myinput_uri,\n",
|
||||||
|
" mount_point = \"/mnt/output\",\n",
|
||||||
|
" extra_configs = configs)\n",
|
||||||
|
"```"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Use Databricks from Azure Machine Learning Pipeline\n",
|
||||||
|
"To use Databricks as a compute target from Azure Machine Learning Pipeline, a DatabricksStep is used. Let's define a datasource (via DataReference) and intermediate data (via PipelineData) to be used in DatabricksStep."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Use the default blob storage\n",
|
||||||
|
"def_blob_store = Datastore(ws, \"workspaceblobstore\")\n",
|
||||||
|
"print('Datastore {} will be used'.format(def_blob_store.name))\n",
|
||||||
|
"\n",
|
||||||
|
"# We are uploading a sample file in the local directory to be used as a datasource\n",
|
||||||
|
"def_blob_store.upload_files(files=[\"./testdata.txt\"], target_path=\"dbtest\", overwrite=False)\n",
|
||||||
|
"\n",
|
||||||
|
"step_1_input = DataReference(datastore=def_blob_store, path_on_datastore=\"dbtest\",\n",
|
||||||
|
" data_reference_name=\"input\")\n",
|
||||||
|
"\n",
|
||||||
|
"step_1_output = PipelineData(\"output\", datastore=def_blob_store)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Add a DatabricksStep\n",
|
||||||
|
"Adds a Databricks notebook as a step in a Pipeline.\n",
|
||||||
|
"- ***name:** Name of the Module\n",
|
||||||
|
"- **inputs:** List of input connections for data consumed by this step. Fetch this inside the notebook using dbutils.widgets.get(\"input\")\n",
|
||||||
|
"- **outputs:** List of output port definitions for outputs produced by this step. Fetch this inside the notebook using dbutils.widgets.get(\"output\")\n",
|
||||||
|
"- **existing_cluster_id:** Cluster ID of an existing Interactive cluster on the Databricks workspace. If you are providing this, do not provide any of the parameters below that are used to create a new cluster such as spark_version, node_type, etc.\n",
|
||||||
|
"- **spark_version:** Version of spark for the databricks run cluster. default value: 4.0.x-scala2.11\n",
|
||||||
|
"- **node_type:** Azure vm node types for the databricks run cluster. default value: Standard_D3_v2\n",
|
||||||
|
"- **num_workers:** Specifies a static number of workers for the databricks run cluster\n",
|
||||||
|
"- **min_workers:** Specifies a min number of workers to use for auto-scaling the databricks run cluster\n",
|
||||||
|
"- **max_workers:** Specifies a max number of workers to use for auto-scaling the databricks run cluster\n",
|
||||||
|
"- **spark_env_variables:** Spark environment variables for the databricks run cluster (dictionary of {str:str}). default value: {'PYSPARK_PYTHON': '/databricks/python3/bin/python3'}\n",
|
||||||
|
"- **notebook_path:** Path to the notebook in the databricks instance. If you are providing this, do not provide python script related paramaters or JAR related parameters.\n",
|
||||||
|
"- **notebook_params:** Parameters for the databricks notebook (dictionary of {str:str}). Fetch this inside the notebook using dbutils.widgets.get(\"myparam\")\n",
|
||||||
|
"- **python_script_path:** The path to the python script in the DBFS or S3. If you are providing this, do not provide python_script_name which is used for uploading script from local machine.\n",
|
||||||
|
"- **python_script_params:** Parameters for the python script (list of str)\n",
|
||||||
|
"- **main_class_name:** The name of the entry point in a JAR module. If you are providing this, do not provide any python script or notebook related parameters.\n",
|
||||||
|
"- **jar_params:** Parameters for the JAR module (list of str)\n",
|
||||||
|
"- **python_script_name:** name of a python script on your local machine (relative to source_directory). If you are providing this do not provide python_script_path which is used to execute a remote python script; or any of the JAR or notebook related parameters.\n",
|
||||||
|
"- **source_directory:** folder that contains the script and other files\n",
|
||||||
|
"- **hash_paths:** list of paths to hash to detect a change in source_directory (script file is always hashed)\n",
|
||||||
|
"- **run_name:** Name in databricks for this run\n",
|
||||||
|
"- **timeout_seconds:** Timeout for the databricks run\n",
|
||||||
|
"- **runconfig:** Runconfig to use. Either pass runconfig or each library type as a separate parameter but do not mix the two\n",
|
||||||
|
"- **maven_libraries:** maven libraries for the databricks run\n",
|
||||||
|
"- **pypi_libraries:** pypi libraries for the databricks run\n",
|
||||||
|
"- **egg_libraries:** egg libraries for the databricks run\n",
|
||||||
|
"- **jar_libraries:** jar libraries for the databricks run\n",
|
||||||
|
"- **rcran_libraries:** rcran libraries for the databricks run\n",
|
||||||
|
"- **compute_target:** Azure Databricks compute\n",
|
||||||
|
"- **allow_reuse:** Whether the step should reuse previous results when run with the same settings/inputs\n",
|
||||||
|
"- **version:** Optional version tag to denote a change in functionality for the step\n",
|
||||||
|
"\n",
|
||||||
|
"\\* *denotes required fields* \n",
|
||||||
|
"*You must provide exactly one of num_workers or min_workers and max_workers paramaters* \n",
|
||||||
|
"*You must provide exactly one of databricks_compute or databricks_compute_name parameters*\n",
|
||||||
|
"\n",
|
||||||
|
"## Use runconfig to specify library dependencies\n",
|
||||||
|
"You can use a runconfig to specify the library dependencies for your cluster in Databricks. The runconfig will contain a databricks section as follows:\n",
|
||||||
|
"\n",
|
||||||
|
"```yaml\n",
|
||||||
|
"environment:\n",
|
||||||
|
"# Databricks details\n",
|
||||||
|
" databricks:\n",
|
||||||
|
"# List of maven libraries.\n",
|
||||||
|
" mavenLibraries:\n",
|
||||||
|
" - coordinates: org.jsoup:jsoup:1.7.1\n",
|
||||||
|
" repo: ''\n",
|
||||||
|
" exclusions:\n",
|
||||||
|
" - slf4j:slf4j\n",
|
||||||
|
" - '*:hadoop-client'\n",
|
||||||
|
"# List of PyPi libraries\n",
|
||||||
|
" pypiLibraries:\n",
|
||||||
|
" - package: beautifulsoup4\n",
|
||||||
|
" repo: ''\n",
|
||||||
|
"# List of RCran libraries\n",
|
||||||
|
" rcranLibraries:\n",
|
||||||
|
" -\n",
|
||||||
|
"# Coordinates.\n",
|
||||||
|
" package: ada\n",
|
||||||
|
"# Repo\n",
|
||||||
|
" repo: http://cran.us.r-project.org\n",
|
||||||
|
"# List of JAR libraries\n",
|
||||||
|
" jarLibraries:\n",
|
||||||
|
" -\n",
|
||||||
|
"# Coordinates.\n",
|
||||||
|
" library: dbfs:/mnt/libraries/library.jar\n",
|
||||||
|
"# List of Egg libraries\n",
|
||||||
|
" eggLibraries:\n",
|
||||||
|
" -\n",
|
||||||
|
"# Coordinates.\n",
|
||||||
|
" library: dbfs:/mnt/libraries/library.egg\n",
|
||||||
|
"```\n",
|
||||||
|
"\n",
|
||||||
|
"You can then create a RunConfiguration object using this file and pass it as the runconfig parameter to DatabricksStep.\n",
|
||||||
|
"```python\n",
|
||||||
|
"from azureml.core.runconfig import RunConfiguration\n",
|
||||||
|
"\n",
|
||||||
|
"runconfig = RunConfiguration()\n",
|
||||||
|
"runconfig.load(path='<directory_where_runconfig_is_stored>', name='<runconfig_file_name>')\n",
|
||||||
|
"```"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### 1. Running the demo notebook already added to the Databricks workspace\n",
|
||||||
|
"Create a notebook in the Azure Databricks workspace, and provide the path to that notebook as the value associated with the environment variable \"DATABRICKS_NOTEBOOK_PATH\". This will then set the variable\u00c2\u00a0notebook_path\u00c2\u00a0when you run the code cell below:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"notebook_path=os.getenv(\"DATABRICKS_NOTEBOOK_PATH\", \"<my-databricks-notebook-path>\") # Databricks notebook path\n",
|
||||||
|
"\n",
|
||||||
|
"dbNbStep = DatabricksStep(\n",
|
||||||
|
" name=\"DBNotebookInWS\",\n",
|
||||||
|
" inputs=[step_1_input],\n",
|
||||||
|
" outputs=[step_1_output],\n",
|
||||||
|
" num_workers=1,\n",
|
||||||
|
" notebook_path=notebook_path,\n",
|
||||||
|
" notebook_params={'myparam': 'testparam'},\n",
|
||||||
|
" run_name='DB_Notebook_demo',\n",
|
||||||
|
" compute_target=databricks_compute,\n",
|
||||||
|
" allow_reuse=True\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Build and submit the Experiment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"steps = [dbNbStep]\n",
|
||||||
|
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
||||||
|
"pipeline_run = Experiment(ws, 'DB_Notebook_demo').submit(pipeline)\n",
|
||||||
|
"pipeline_run.wait_for_completion()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### View Run Details"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"RunDetails(pipeline_run).show()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### 2. Running a Python script from DBFS\n",
|
||||||
|
"This shows how to run a Python script in DBFS. \n",
|
||||||
|
"\n",
|
||||||
|
"To complete this, you will need to first upload the Python script in your local machine to DBFS using the [CLI](https://docs.azuredatabricks.net/user-guide/dbfs-databricks-file-system.html). The CLI command is given below:\n",
|
||||||
|
"\n",
|
||||||
|
"```\n",
|
||||||
|
"dbfs cp ./train-db-dbfs.py dbfs:/train-db-dbfs.py\n",
|
||||||
|
"```\n",
|
||||||
|
"\n",
|
||||||
|
"The code in the below cell assumes that you have completed the previous step of uploading the script `train-db-dbfs.py` to the root folder in DBFS."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"python_script_path = os.getenv(\"DATABRICKS_PYTHON_SCRIPT_PATH\", \"<my-databricks-python-script-path>\") # Databricks python script path\n",
|
||||||
|
"\n",
|
||||||
|
"dbPythonInDbfsStep = DatabricksStep(\n",
|
||||||
|
" name=\"DBPythonInDBFS\",\n",
|
||||||
|
" inputs=[step_1_input],\n",
|
||||||
|
" num_workers=1,\n",
|
||||||
|
" python_script_path=python_script_path,\n",
|
||||||
|
" python_script_params={'--input_data'},\n",
|
||||||
|
" run_name='DB_Python_demo',\n",
|
||||||
|
" compute_target=databricks_compute,\n",
|
||||||
|
" allow_reuse=True\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Build and submit the Experiment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"steps = [dbPythonInDbfsStep]\n",
|
||||||
|
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
||||||
|
"pipeline_run = Experiment(ws, 'DB_Python_demo').submit(pipeline)\n",
|
||||||
|
"pipeline_run.wait_for_completion()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### View Run Details"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"RunDetails(pipeline_run).show()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### 3. Running a Python script in Databricks that currenlty is in local computer\n",
|
||||||
|
"To run a Python script that is currently in your local computer, follow the instructions below. \n",
|
||||||
|
"\n",
|
||||||
|
"The commented out code below code assumes that you have `train-db-local.py` in the `scripts` subdirectory under the current working directory.\n",
|
||||||
|
"\n",
|
||||||
|
"In this case, the Python script will be uploaded first to DBFS, and then the script will be run in Databricks."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"python_script_name = \"train-db-local.py\"\n",
|
||||||
|
"source_directory = \".\"\n",
|
||||||
|
"\n",
|
||||||
|
"dbPythonInLocalMachineStep = DatabricksStep(\n",
|
||||||
|
" name=\"DBPythonInLocalMachine\",\n",
|
||||||
|
" inputs=[step_1_input],\n",
|
||||||
|
" num_workers=1,\n",
|
||||||
|
" python_script_name=python_script_name,\n",
|
||||||
|
" source_directory=source_directory,\n",
|
||||||
|
" run_name='DB_Python_Local_demo',\n",
|
||||||
|
" compute_target=databricks_compute,\n",
|
||||||
|
" allow_reuse=True\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Build and submit the Experiment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"steps = [dbPythonInLocalMachineStep]\n",
|
||||||
|
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
||||||
|
"pipeline_run = Experiment(ws, 'DB_Python_Local_demo').submit(pipeline)\n",
|
||||||
|
"pipeline_run.wait_for_completion()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### View Run Details"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"RunDetails(pipeline_run).show()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### 4. Running a JAR job that is alreay added in DBFS\n",
|
||||||
|
"To run a JAR job that is already uploaded to DBFS, follow the instructions below. You will first upload the JAR file to DBFS using the [CLI](https://docs.azuredatabricks.net/user-guide/dbfs-databricks-file-system.html).\n",
|
||||||
|
"\n",
|
||||||
|
"The commented out code in the below cell assumes that you have uploaded `train-db-dbfs.jar` to the root folder in DBFS. You can upload `train-db-dbfs.jar` to the root folder in DBFS using this commandline so you can use `jar_library_dbfs_path = \"dbfs:/train-db-dbfs.jar\"`:\n",
|
||||||
|
"\n",
|
||||||
|
"```\n",
|
||||||
|
"dbfs cp ./train-db-dbfs.jar dbfs:/train-db-dbfs.jar\n",
|
||||||
|
"```"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"main_jar_class_name = \"com.microsoft.aeva.Main\"\n",
|
||||||
|
"jar_library_dbfs_path = os.getenv(\"DATABRICKS_JAR_LIB_PATH\", \"<my-databricks-jar-lib-path>\") # Databricks jar library path\n",
|
||||||
|
"\n",
|
||||||
|
"dbJarInDbfsStep = DatabricksStep(\n",
|
||||||
|
" name=\"DBJarInDBFS\",\n",
|
||||||
|
" inputs=[step_1_input],\n",
|
||||||
|
" num_workers=1,\n",
|
||||||
|
" main_class_name=main_jar_class_name,\n",
|
||||||
|
" jar_params={'arg1', 'arg2'},\n",
|
||||||
|
" run_name='DB_JAR_demo',\n",
|
||||||
|
" jar_libraries=[JarLibrary(jar_library_dbfs_path)],\n",
|
||||||
|
" compute_target=databricks_compute,\n",
|
||||||
|
" allow_reuse=True\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Build and submit the Experiment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"steps = [dbJarInDbfsStep]\n",
|
||||||
|
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
||||||
|
"pipeline_run = Experiment(ws, 'DB_JAR_demo').submit(pipeline)\n",
|
||||||
|
"pipeline_run.wait_for_completion()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### View Run Details"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"RunDetails(pipeline_run).show()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Next: ADLA as a Compute Target\n",
|
||||||
|
"To use ADLA as a compute target from Azure Machine Learning Pipeline, a AdlaStep is used. This [notebook](./aml-pipelines-use-adla-as-compute-target.ipynb) demonstrates the use of AdlaStep in Azure Machine Learning Pipeline."
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "diray"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.6",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python36"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.2"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
@@ -263,7 +263,7 @@
|
|||||||
" prediction = aci_service.run(input_data=test_sample)\n",
|
" prediction = aci_service.run(input_data=test_sample)\n",
|
||||||
" print(prediction)\n",
|
" print(prediction)\n",
|
||||||
"else:\n",
|
"else:\n",
|
||||||
" raise ValueError(\"Service deployment isn't healthy, can't call the service\")"
|
" raise ValueError(\"Service deployment isn't healthy, can't call the service. Error: \", aci_service.error)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -381,7 +381,7 @@
|
|||||||
" aks_service.wait_for_deployment(show_output = True)\n",
|
" aks_service.wait_for_deployment(show_output = True)\n",
|
||||||
" print(aks_service.state)\n",
|
" print(aks_service.state)\n",
|
||||||
"else:\n",
|
"else:\n",
|
||||||
" raise ValueError(\"AKS provisioning failed.\")"
|
" raise ValueError(\"AKS provisioning failed. Error: \", aks_service.error)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -409,7 +409,7 @@
|
|||||||
" prediction = aks_service.run(input_data=test_sample)\n",
|
" prediction = aks_service.run(input_data=test_sample)\n",
|
||||||
" print(prediction)\n",
|
" print(prediction)\n",
|
||||||
"else:\n",
|
"else:\n",
|
||||||
" raise ValueError(\"Service deployment isn't healthy, can't call the service\")"
|
" raise ValueError(\"Service deployment isn't healthy, can't call the service. Error: \", aks_service.error)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -465,7 +465,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"authors": [
|
"authors": [
|
||||||
{
|
{
|
||||||
"name": "jocier"
|
"name": "shipatel"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
|
|||||||
@@ -329,7 +329,7 @@
|
|||||||
" aks_service.wait_for_deployment(show_output = True)\n",
|
" aks_service.wait_for_deployment(show_output = True)\n",
|
||||||
" print(aks_service.state)\n",
|
" print(aks_service.state)\n",
|
||||||
"else: \n",
|
"else: \n",
|
||||||
" raise ValueError(\"aks provisioning failed, can't deploy service\")"
|
" raise ValueError(\"aks provisioning failed, can't deploy service. Error: \", aks_service.error)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -362,7 +362,7 @@
|
|||||||
" prediction = aks_service.run(input_data=test_sample)\n",
|
" prediction = aks_service.run(input_data=test_sample)\n",
|
||||||
" print(prediction)\n",
|
" print(prediction)\n",
|
||||||
"else:\n",
|
"else:\n",
|
||||||
" raise ValueError(\"Service deployment isn't healthy, can't call the service\")"
|
" raise ValueError(\"Service deployment isn't healthy, can't call the service. Error: \", aks_service.error)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -445,7 +445,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"authors": [
|
"authors": [
|
||||||
{
|
{
|
||||||
"name": "jocier"
|
"name": "shipatel"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
|
|||||||
2
how-to-use-azureml/deployment/onnx/Dockerfile
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
RUN apt-get update
|
||||||
|
RUN apt-get install -y libgomp1
|
||||||
@@ -272,6 +272,7 @@
|
|||||||
"image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n",
|
"image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n",
|
||||||
" runtime = \"python\",\n",
|
" runtime = \"python\",\n",
|
||||||
" conda_file = \"myenv.yml\",\n",
|
" conda_file = \"myenv.yml\",\n",
|
||||||
|
" docker_file = \"Dockerfile\",\n",
|
||||||
" description = \"TinyYOLO ONNX Demo\",\n",
|
" description = \"TinyYOLO ONNX Demo\",\n",
|
||||||
" tags = {\"demo\": \"onnx\"}\n",
|
" tags = {\"demo\": \"onnx\"}\n",
|
||||||
" )\n",
|
" )\n",
|
||||||
|
|||||||
@@ -350,6 +350,7 @@
|
|||||||
"image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n",
|
"image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n",
|
||||||
" runtime = \"python\",\n",
|
" runtime = \"python\",\n",
|
||||||
" conda_file = \"myenv.yml\",\n",
|
" conda_file = \"myenv.yml\",\n",
|
||||||
|
" docker_file = \"Dockerfile\",\n",
|
||||||
" description = \"Emotion ONNX Runtime container\",\n",
|
" description = \"Emotion ONNX Runtime container\",\n",
|
||||||
" tags = {\"demo\": \"onnx\"})\n",
|
" tags = {\"demo\": \"onnx\"})\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -772,7 +773,7 @@
|
|||||||
"- ensured that your deep learning model is working perfectly (in the cloud) on test data, and checked it against some of your own!\n",
|
"- ensured that your deep learning model is working perfectly (in the cloud) on test data, and checked it against some of your own!\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Next steps:\n",
|
"Next steps:\n",
|
||||||
"- If you have not already, check out another interesting ONNX/AML application that lets you set up a state-of-the-art [handwritten image classification model (MNIST)](https://github.com/Azure/MachineLearningNotebooks/tree/master/onnx/onnx-inference-mnist.ipynb) in the cloud! This tutorial deploys a pre-trained ONNX Computer Vision model for handwritten digit classification in an Azure ML virtual machine.\n",
|
"- If you have not already, check out another interesting ONNX/AML application that lets you set up a state-of-the-art [handwritten image classification model (MNIST)](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.ipynb) in the cloud! This tutorial deploys a pre-trained ONNX Computer Vision model for handwritten digit classification in an Azure ML virtual machine.\n",
|
||||||
"- Keep an eye out for an updated version of this tutorial that uses ONNX Runtime GPU.\n",
|
"- Keep an eye out for an updated version of this tutorial that uses ONNX Runtime GPU.\n",
|
||||||
"- Contribute to our [open source ONNX repository on github](http://github.com/onnx/onnx) and/or add to our [ONNX model zoo](http://github.com/onnx/models)"
|
"- Contribute to our [open source ONNX repository on github](http://github.com/onnx/onnx) and/or add to our [ONNX model zoo](http://github.com/onnx/models)"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -333,6 +333,7 @@
|
|||||||
"image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n",
|
"image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n",
|
||||||
" runtime = \"python\",\n",
|
" runtime = \"python\",\n",
|
||||||
" conda_file = \"myenv.yml\",\n",
|
" conda_file = \"myenv.yml\",\n",
|
||||||
|
" docker_file = \"Dockerfile\",\n",
|
||||||
" description = \"MNIST ONNX Runtime container\",\n",
|
" description = \"MNIST ONNX Runtime container\",\n",
|
||||||
" tags = {\"demo\": \"onnx\"}) \n",
|
" tags = {\"demo\": \"onnx\"}) \n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -777,7 +778,7 @@
|
|||||||
"- ensured that your deep learning model is working perfectly (in the cloud) on test data, and checked it against some of your own!\n",
|
"- ensured that your deep learning model is working perfectly (in the cloud) on test data, and checked it against some of your own!\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Next steps:\n",
|
"Next steps:\n",
|
||||||
"- Check out another interesting application based on a Microsoft Research computer vision paper that lets you set up a [facial emotion recognition model](https://github.com/Azure/MachineLearningNotebooks/tree/master/onnx/onnx-inference-emotion-recognition.ipynb) in the cloud! This tutorial deploys a pre-trained ONNX Computer Vision model in an Azure ML virtual machine.\n",
|
"- Check out another interesting application based on a Microsoft Research computer vision paper that lets you set up a [facial emotion recognition model](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-inference-facial-expression-recognition-deploy.ipynb) in the cloud! This tutorial deploys a pre-trained ONNX Computer Vision model in an Azure ML virtual machine.\n",
|
||||||
"- Contribute to our [open source ONNX repository on github](http://github.com/onnx/onnx) and/or add to our [ONNX model zoo](http://github.com/onnx/models)"
|
"- Contribute to our [open source ONNX repository on github](http://github.com/onnx/onnx) and/or add to our [ONNX model zoo](http://github.com/onnx/models)"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -256,6 +256,7 @@
|
|||||||
"image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n",
|
"image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n",
|
||||||
" runtime = \"python\",\n",
|
" runtime = \"python\",\n",
|
||||||
" conda_file = \"myenv.yml\",\n",
|
" conda_file = \"myenv.yml\",\n",
|
||||||
|
" docker_file = \"Dockerfile\",\n",
|
||||||
" description = \"ONNX ResNet50 Demo\",\n",
|
" description = \"ONNX ResNet50 Demo\",\n",
|
||||||
" tags = {\"demo\": \"onnx\"}\n",
|
" tags = {\"demo\": \"onnx\"}\n",
|
||||||
" )\n",
|
" )\n",
|
||||||
|
|||||||
@@ -167,6 +167,31 @@
|
|||||||
"image.wait_for_creation(show_output = True)"
|
"image.wait_for_creation(show_output = True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Use a custom Docker image\n",
|
||||||
|
"\n",
|
||||||
|
"You can also specify a custom Docker image to be used as base image if you don't want to use the default base image provided by Azure ML. Please make sure the custom Docker image has Ubuntu >= 16.04, Conda >= 4.5.\\* and Python(3.5.\\* or 3.6.\\*).\n",
|
||||||
|
"\n",
|
||||||
|
"Only Supported for `ContainerImage`(from azureml.core.image) with `python` runtime.\n",
|
||||||
|
"```python\n",
|
||||||
|
"# use an image available in public Container Registry without authentication\n",
|
||||||
|
"image_config.base_image = \"mcr.microsoft.com/azureml/o16n-sample-user-base/ubuntu-miniconda\"\n",
|
||||||
|
"\n",
|
||||||
|
"# or, use an image available in a private Container Registry\n",
|
||||||
|
"image_config.base_image = \"myregistry.azurecr.io/mycustomimage:1.0\"\n",
|
||||||
|
"image_config.base_image_registry.address = \"myregistry.azurecr.io\"\n",
|
||||||
|
"image_config.base_image_registry.username = \"username\"\n",
|
||||||
|
"image_config.base_image_registry.password = \"password\"\n",
|
||||||
|
"\n",
|
||||||
|
"# or, use an image built during training.\n",
|
||||||
|
"image_config.base_image = run.properties[\"AzureML.DerivedImageName\"]\n",
|
||||||
|
"```\n",
|
||||||
|
"You can get the address of training image from the properties of a Run object. Only new runs submitted with azureml-sdk>=1.0.22 to AMLCompute targets will have the 'AzureML.DerivedImageName' property. Instructions on how to get a Run can be found in [manage-runs](../../training/manage-runs/manage-runs.ipynb). \n"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -317,7 +342,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"authors": [
|
"authors": [
|
||||||
{
|
{
|
||||||
"name": "raymondl"
|
"name": "aashishb"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
|
|||||||
@@ -261,6 +261,31 @@
|
|||||||
"image.wait_for_creation(show_output = True)"
|
"image.wait_for_creation(show_output = True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Use a custom Docker image\n",
|
||||||
|
"\n",
|
||||||
|
"You can also specify a custom Docker image to be used as base image if you don't want to use the default base image provided by Azure ML. Please make sure the custom Docker image has Ubuntu >= 16.04, Conda >= 4.5.\\* and Python(3.5.\\* or 3.6.\\*).\n",
|
||||||
|
"\n",
|
||||||
|
"Only Supported for `ContainerImage`(from azureml.core.image) with `python` runtime.\n",
|
||||||
|
"```python\n",
|
||||||
|
"# use an image available in public Container Registry without authentication\n",
|
||||||
|
"image_config.base_image = \"mcr.microsoft.com/azureml/o16n-sample-user-base/ubuntu-miniconda\"\n",
|
||||||
|
"\n",
|
||||||
|
"# or, use an image available in a private Container Registry\n",
|
||||||
|
"image_config.base_image = \"myregistry.azurecr.io/mycustomimage:1.0\"\n",
|
||||||
|
"image_config.base_image_registry.address = \"myregistry.azurecr.io\"\n",
|
||||||
|
"image_config.base_image_registry.username = \"username\"\n",
|
||||||
|
"image_config.base_image_registry.password = \"password\"\n",
|
||||||
|
"\n",
|
||||||
|
"# or, use an image built during training.\n",
|
||||||
|
"image_config.base_image = run.properties[\"AzureML.DerivedImageName\"]\n",
|
||||||
|
"```\n",
|
||||||
|
"You can get the address of training image from the properties of a Run object. Only new runs submitted with azureml-sdk>=1.0.22 to AMLCompute targets will have the 'AzureML.DerivedImageName' property. Instructions on how to get a Run can be found in [manage-runs](../../training/manage-runs/manage-runs.ipynb). \n"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -395,7 +420,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"authors": [
|
"authors": [
|
||||||
{
|
{
|
||||||
"name": "raymondl"
|
"name": "aashishb"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
|
|||||||
11
how-to-use-azureml/explain-model/README.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
## Using explain model APIs
|
||||||
|
|
||||||
|
Follow these sample notebooks to learn:
|
||||||
|
|
||||||
|
1. [Explain tabular data](explain-tabular-data): Basic example of explaining model trained on tabular data.
|
||||||
|
2. [Explain local classification](explain-local-sklearn-classification): Explain a scikit-learn classification model.
|
||||||
|
3. [Explain local regression](explain-local-sklearn-regression): Explain a scikit-learn regression model.
|
||||||
|
4. [Explain on remote AMLCompute](explain-on-amlcompute): Explain a model on a remote AMLCompute target.
|
||||||
|
5. [Explain classification using Run History](explain-run-history-sklearn-classification): Explain a scikit-learn classification model with Run History.
|
||||||
|
6. [Explain regression using Run History](explain-run-history-sklearn-regression): Explain a scikit-learn regression model with Run History.
|
||||||
|
7. [Explain scikit-learn raw features](explain-sklearn-raw-features): Explain the raw features of a trained scikit-learn model.
|
||||||
@@ -0,0 +1,243 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Breast cancer diagnosis classification with scikit-learn (run model explainer locally)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Explain a model with the AML explain-model package\n",
|
||||||
|
"\n",
|
||||||
|
"1. Train a SVM classification model using Scikit-learn\n",
|
||||||
|
"2. Run 'explain_model' with full data in local mode, which doesn't contact any Azure services\n",
|
||||||
|
"3. Run 'explain_model' with summarized data in local mode, which doesn't contact any Azure services"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from sklearn.datasets import load_breast_cancer\n",
|
||||||
|
"from sklearn import svm\n",
|
||||||
|
"from azureml.explain.model.tabular_explainer import TabularExplainer"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# 1. Run model explainer locally with full data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Load the breast cancer diagnosis data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"breast_cancer_data = load_breast_cancer()\n",
|
||||||
|
"classes = breast_cancer_data.target_names.tolist()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Split data into train and test\n",
|
||||||
|
"from sklearn.model_selection import train_test_split\n",
|
||||||
|
"x_train, x_test, y_train, y_test = train_test_split(breast_cancer_data.data, breast_cancer_data.target, test_size=0.2, random_state=0)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Train a SVM classification model, which you want to explain"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"clf = svm.SVC(gamma=0.001, C=100., probability=True)\n",
|
||||||
|
"model = clf.fit(x_train, y_train)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Explain predictions on your local machine"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"tabular_explainer = TabularExplainer(model, x_train, features=breast_cancer_data.feature_names, classes=classes)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Explain overall model predictions (global explanation)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Passing in test dataset for evaluation examples - note it must be a representative sample of the original data\n",
|
||||||
|
"# x_train can be passed as well, but with more examples explanations will take longer although they may be more accurate\n",
|
||||||
|
"global_explanation = tabular_explainer.explain_global(x_test)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Sorted SHAP values\n",
|
||||||
|
"print('ranked global importance values: {}'.format(global_explanation.get_ranked_global_values()))\n",
|
||||||
|
"# Corresponding feature names\n",
|
||||||
|
"print('ranked global importance names: {}'.format(global_explanation.get_ranked_global_names()))\n",
|
||||||
|
"# feature ranks (based on original order of features)\n",
|
||||||
|
"print('global importance rank: {}'.format(global_explanation.global_importance_rank))\n",
|
||||||
|
"# per class feature names\n",
|
||||||
|
"print('ranked per class feature names: {}'.format(global_explanation.get_ranked_per_class_names()))\n",
|
||||||
|
"# per class feature importance values\n",
|
||||||
|
"print('ranked per class feature values: {}'.format(global_explanation.get_ranked_per_class_values()))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"dict(zip(global_explanation.get_ranked_global_names(), global_explanation.get_ranked_global_values()))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Explain overall model predictions as a collection of local (instance-level) explanations"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# feature shap values for all features and all data points in the training data\n",
|
||||||
|
"print('local importance values: {}'.format(global_explanation.local_importance_values))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Explain local data points (individual instances)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"local_explanation = tabular_explainer.explain_local(x_test[0,:])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# local feature importance information\n",
|
||||||
|
"local_importance_values = local_explanation.local_importance_values\n",
|
||||||
|
"print('local importance for first instance: {}'.format(local_importance_values[y_test[0]]))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print('local importance feature names: {}'.format(list(local_explanation.features)))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"dict(zip(local_explanation.features, local_explanation.local_importance_values[y_test[0]]))"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "wamartin"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.6",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python36"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.8"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
@@ -0,0 +1,231 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Boston Housing Price Prediction with scikit-learn (run model explainer locally)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Explain a model with the AML explain-model package\n",
|
||||||
|
"\n",
|
||||||
|
"1. Train a GradientBoosting regression model using Scikit-learn\n",
|
||||||
|
"2. Run 'explain_model' with full dataset in local mode, which doesn't contact any Azure services.\n",
|
||||||
|
"3. Run 'explain_model' with summarized dataset in local mode, which doesn't contact any Azure services."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from sklearn import datasets\n",
|
||||||
|
"from sklearn.ensemble import GradientBoostingRegressor\n",
|
||||||
|
"from azureml.explain.model.tabular_explainer import TabularExplainer"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# 1. Run model explainer locally with full data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Load the Boston house price data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"boston_data = datasets.load_boston()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Split data into train and test\n",
|
||||||
|
"from sklearn.model_selection import train_test_split\n",
|
||||||
|
"x_train, x_test, y_train, y_test = train_test_split(boston_data.data, boston_data.target, test_size=0.2, random_state=0)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Train a GradientBoosting Regression model, which you want to explain"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,\n",
|
||||||
|
" learning_rate=0.1, loss='huber',\n",
|
||||||
|
" random_state=1)\n",
|
||||||
|
"model = clf.fit(x_train, y_train)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Explain predictions on your local machine"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"tabular_explainer = TabularExplainer(model, x_train, features = boston_data.feature_names)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Explain overall model predictions (global explanation)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Passing in test dataset for evaluation examples - note it must be a representative sample of the original data\n",
|
||||||
|
"# x_train can be passed as well, but with more examples explanations will take longer although they may be more accurate\n",
|
||||||
|
"global_explanation = tabular_explainer.explain_global(x_test)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"help(global_explanation)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Sorted SHAP values \n",
|
||||||
|
"print('ranked global importance values: {}'.format(global_explanation.get_ranked_global_values()))\n",
|
||||||
|
"# Corresponding feature names\n",
|
||||||
|
"print('ranked global importance names: {}'.format(global_explanation.get_ranked_global_names()))\n",
|
||||||
|
"# feature ranks (based on original order of features)\n",
|
||||||
|
"print('global importance rank: {}'.format(global_explanation.global_importance_rank))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"dict(zip(global_explanation.get_ranked_global_names(), global_explanation.get_ranked_global_values()))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Explain overall model predictions as a collection of local (instance-level) explanations"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# feature shap values for all features and all data points in the training data\n",
|
||||||
|
"print('local importance values: {}'.format(global_explanation.local_importance_values))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Explain local data points (individual instances)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"local_explanation = tabular_explainer.explain_local(x_test[0,:])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# local feature importance information\n",
|
||||||
|
"local_importance_values = local_explanation.local_importance_values\n",
|
||||||
|
"print('local importance values: {}'.format(local_importance_values))"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "wamartin"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.6",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python36"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.8"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
@@ -0,0 +1,602 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Train using Azure Machine Learning Compute\n",
|
||||||
|
"\n",
|
||||||
|
"* Initialize a Workspace\n",
|
||||||
|
"* Create an Experiment\n",
|
||||||
|
"* Introduction to AmlCompute\n",
|
||||||
|
"* Submit an AmlCompute run in a few different ways\n",
|
||||||
|
" - Provision as a run based compute target \n",
|
||||||
|
" - Provision as a persistent compute target (Basic)\n",
|
||||||
|
" - Provision as a persistent compute target (Advanced)\n",
|
||||||
|
"* Additional operations to perform on AmlCompute\n",
|
||||||
|
"* Download model explanation data from the Run History Portal\n",
|
||||||
|
"* Print the explanation data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Prerequisites\n",
|
||||||
|
"Make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Check core SDK version number\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Initialize a Workspace\n",
|
||||||
|
"\n",
|
||||||
|
"Initialize a workspace object from persisted configuration"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"tags": [
|
||||||
|
"create workspace"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core import Workspace\n",
|
||||||
|
"\n",
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\\n')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Create An Experiment\n",
|
||||||
|
"\n",
|
||||||
|
"**Experiment** is a logical container in an Azure ML Workspace. It hosts run records which can include run metrics and output artifacts from your experiments."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core import Experiment\n",
|
||||||
|
"experiment_name = 'explainer-remote-run-on-amlcompute'\n",
|
||||||
|
"experiment = Experiment(workspace=ws, name=experiment_name)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Introduction to AmlCompute\n",
|
||||||
|
"\n",
|
||||||
|
"Azure Machine Learning Compute is managed compute infrastructure that allows the user to easily create single to multi-node compute of the appropriate VM Family. It is created **within your workspace region** and is a resource that can be used by other users in your workspace. It autoscales by default to the max_nodes, when a job is submitted, and executes in a containerized environment packaging the dependencies as specified by the user. \n",
|
||||||
|
"\n",
|
||||||
|
"Since it is managed compute, job scheduling and cluster management are handled internally by Azure Machine Learning service. \n",
|
||||||
|
"\n",
|
||||||
|
"For more information on Azure Machine Learning Compute, please read [this article](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)\n",
|
||||||
|
"\n",
|
||||||
|
"If you are an existing BatchAI customer who is migrating to Azure Machine Learning, please read [this article](https://aka.ms/batchai-retirement)\n",
|
||||||
|
"\n",
|
||||||
|
"**Note**: As with other Azure services, there are limits on certain resources (for eg. AmlCompute quota) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"The training script `run_explainer.py` is already created for you. Let's have a look."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Submit an AmlCompute run in a few different ways\n",
|
||||||
|
"\n",
|
||||||
|
"First lets check which VM families are available in your region. Azure is a regional service and some specialized SKUs (especially GPUs) are only available in certain regions. Since AmlCompute is created in the region of your workspace, we will use the supported_vms () function to see if the VM family we want to use ('STANDARD_D2_V2') is supported.\n",
|
||||||
|
"\n",
|
||||||
|
"You can also pass a different region to check availability and then re-create your workspace in that region through the [configuration notebook](../../../configuration.ipynb)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
|
"\n",
|
||||||
|
"AmlCompute.supported_vmsizes(workspace=ws)\n",
|
||||||
|
"# AmlCompute.supported_vmsizes(workspace=ws, location='southcentralus')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Create project directory\n",
|
||||||
|
"\n",
|
||||||
|
"Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script, and any additional files your training script depends on"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os\n",
|
||||||
|
"import shutil\n",
|
||||||
|
"\n",
|
||||||
|
"project_folder = './explainer-remote-run-on-amlcompute'\n",
|
||||||
|
"os.makedirs(project_folder, exist_ok=True)\n",
|
||||||
|
"shutil.copy('run_explainer.py', project_folder)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Provision as a run based compute target\n",
|
||||||
|
"\n",
|
||||||
|
"You can provision AmlCompute as a compute target at run-time. In this case, the compute is auto-created for your run, scales up to max_nodes that you specify, and then **deleted automatically** after the run completes."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.runconfig import RunConfiguration\n",
|
||||||
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||||
|
"from azureml.core.runconfig import DEFAULT_CPU_IMAGE\n",
|
||||||
|
"\n",
|
||||||
|
"# create a new runconfig object\n",
|
||||||
|
"run_config = RunConfiguration()\n",
|
||||||
|
"\n",
|
||||||
|
"# signal that you want to use AmlCompute to execute script.\n",
|
||||||
|
"run_config.target = \"amlcompute\"\n",
|
||||||
|
"\n",
|
||||||
|
"# AmlCompute will be created in the same region as workspace\n",
|
||||||
|
"# Set vm size for AmlCompute\n",
|
||||||
|
"run_config.amlcompute.vm_size = 'STANDARD_D2_V2'\n",
|
||||||
|
"\n",
|
||||||
|
"# enable Docker \n",
|
||||||
|
"run_config.environment.docker.enabled = True\n",
|
||||||
|
"\n",
|
||||||
|
"# set Docker base image to the default CPU-based image\n",
|
||||||
|
"run_config.environment.docker.base_image = DEFAULT_CPU_IMAGE\n",
|
||||||
|
"\n",
|
||||||
|
"# use conda_dependencies.yml to create a conda environment in the Docker image for execution\n",
|
||||||
|
"run_config.environment.python.user_managed_dependencies = False\n",
|
||||||
|
"\n",
|
||||||
|
"# auto-prepare the Docker image when used for execution (if it is not already prepared)\n",
|
||||||
|
"run_config.auto_prepare_environment = True\n",
|
||||||
|
"\n",
|
||||||
|
"azureml_pip_packages = [\n",
|
||||||
|
" 'azureml-defaults', 'azureml-contrib-explain-model', 'azureml-core', 'azureml-telemetry',\n",
|
||||||
|
" 'azureml-explain-model'\n",
|
||||||
|
"]\n",
|
||||||
|
"\n",
|
||||||
|
"# specify CondaDependencies obj\n",
|
||||||
|
"run_config.environment.python.conda_dependencies = CondaDependencies.create(conda_packages=['scikit-learn'],\n",
|
||||||
|
" pip_packages=azureml_pip_packages)\n",
|
||||||
|
"\n",
|
||||||
|
"# Now submit a run on AmlCompute\n",
|
||||||
|
"from azureml.core.script_run_config import ScriptRunConfig\n",
|
||||||
|
"\n",
|
||||||
|
"script_run_config = ScriptRunConfig(source_directory=project_folder,\n",
|
||||||
|
" script='run_explainer.py',\n",
|
||||||
|
" run_config=run_config)\n",
|
||||||
|
"\n",
|
||||||
|
"run = experiment.submit(script_run_config)\n",
|
||||||
|
"\n",
|
||||||
|
"# Show run details\n",
|
||||||
|
"run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run)."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%time\n",
|
||||||
|
"# Shows output of the run on stdout.\n",
|
||||||
|
"run.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Provision as a persistent compute target (Basic)\n",
|
||||||
|
"\n",
|
||||||
|
"You can provision a persistent AmlCompute resource by simply defining two parameters thanks to smart defaults. By default it autoscales from 0 nodes and provisions dedicated VMs to run your job in a container. This is useful when you want to continously re-use the same target, debug it between jobs or simply share the resource with other users of your workspace.\n",
|
||||||
|
"\n",
|
||||||
|
"* `vm_size`: VM family of the nodes provisioned by AmlCompute. Simply choose from the supported_vmsizes() above\n",
|
||||||
|
"* `max_nodes`: Maximum nodes to autoscale to while running a job on AmlCompute"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
|
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||||
|
"\n",
|
||||||
|
"# Choose a name for your CPU cluster\n",
|
||||||
|
"cpu_cluster_name = \"cpucluster\"\n",
|
||||||
|
"\n",
|
||||||
|
"# Verify that cluster does not exist already\n",
|
||||||
|
"try:\n",
|
||||||
|
" cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||||
|
" print('Found existing cluster, use it.')\n",
|
||||||
|
"except ComputeTargetException:\n",
|
||||||
|
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
||||||
|
" max_nodes=4)\n",
|
||||||
|
" cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||||
|
"\n",
|
||||||
|
"cpu_cluster.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Configure & Run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.runconfig import RunConfiguration\n",
|
||||||
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||||
|
"\n",
|
||||||
|
"# create a new RunConfig object\n",
|
||||||
|
"run_config = RunConfiguration(framework=\"python\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Set compute target to AmlCompute target created in previous step\n",
|
||||||
|
"run_config.target = cpu_cluster.name\n",
|
||||||
|
"\n",
|
||||||
|
"# enable Docker \n",
|
||||||
|
"run_config.environment.docker.enabled = True\n",
|
||||||
|
"\n",
|
||||||
|
"azureml_pip_packages = [\n",
|
||||||
|
" 'azureml-defaults', 'azureml-contrib-explain-model', 'azureml-core', 'azureml-telemetry',\n",
|
||||||
|
" 'azureml-explain-model'\n",
|
||||||
|
"]\n",
|
||||||
|
"\n",
|
||||||
|
"# specify CondaDependencies obj\n",
|
||||||
|
"run_config.environment.python.conda_dependencies = CondaDependencies.create(conda_packages=['scikit-learn'],\n",
|
||||||
|
" pip_packages=azureml_pip_packages)\n",
|
||||||
|
"\n",
|
||||||
|
"from azureml.core import Run\n",
|
||||||
|
"from azureml.core import ScriptRunConfig\n",
|
||||||
|
"\n",
|
||||||
|
"src = ScriptRunConfig(source_directory=project_folder, \n",
|
||||||
|
" script='run_explainer.py', \n",
|
||||||
|
" run_config=run_config) \n",
|
||||||
|
"run = experiment.submit(config=src)\n",
|
||||||
|
"run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%time\n",
|
||||||
|
"# Shows output of the run on stdout.\n",
|
||||||
|
"run.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"run.get_metrics()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Provision as a persistent compute target (Advanced)\n",
|
||||||
|
"\n",
|
||||||
|
"You can also specify additional properties or change defaults while provisioning AmlCompute using a more advanced configuration. This is useful when you want a dedicated cluster of 4 nodes (for example you can set the min_nodes and max_nodes to 4), or want the compute to be within an existing VNet in your subscription.\n",
|
||||||
|
"\n",
|
||||||
|
"In addition to `vm_size` and `max_nodes`, you can specify:\n",
|
||||||
|
"* `min_nodes`: Minimum nodes (default 0 nodes) to downscale to while running a job on AmlCompute\n",
|
||||||
|
"* `vm_priority`: Choose between 'dedicated' (default) and 'lowpriority' VMs when provisioning AmlCompute. Low Priority VMs use Azure's excess capacity and are thus cheaper but risk your run being pre-empted\n",
|
||||||
|
"* `idle_seconds_before_scaledown`: Idle time (default 120 seconds) to wait after run completion before auto-scaling to min_nodes\n",
|
||||||
|
"* `vnet_resourcegroup_name`: Resource group of the **existing** VNet within which AmlCompute should be provisioned\n",
|
||||||
|
"* `vnet_name`: Name of VNet\n",
|
||||||
|
"* `subnet_name`: Name of SubNet within the VNet"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
|
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||||
|
"\n",
|
||||||
|
"# Choose a name for your CPU cluster\n",
|
||||||
|
"cpu_cluster_name = \"cpucluster\"\n",
|
||||||
|
"\n",
|
||||||
|
"# Verify that cluster does not exist already\n",
|
||||||
|
"try:\n",
|
||||||
|
" cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||||
|
" print('Found existing cluster, use it.')\n",
|
||||||
|
"except ComputeTargetException:\n",
|
||||||
|
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
||||||
|
" vm_priority='lowpriority',\n",
|
||||||
|
" min_nodes=2,\n",
|
||||||
|
" max_nodes=4,\n",
|
||||||
|
" idle_seconds_before_scaledown='300',\n",
|
||||||
|
" vnet_resourcegroup_name='<my-resource-group>',\n",
|
||||||
|
" vnet_name='<my-vnet-name>',\n",
|
||||||
|
" subnet_name='<my-subnet-name>')\n",
|
||||||
|
" cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||||
|
"\n",
|
||||||
|
"cpu_cluster.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Configure & Run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.runconfig import RunConfiguration\n",
|
||||||
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||||
|
"\n",
|
||||||
|
"# create a new RunConfig object\n",
|
||||||
|
"run_config = RunConfiguration(framework=\"python\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Set compute target to AmlCompute target created in previous step\n",
|
||||||
|
"run_config.target = cpu_cluster.name\n",
|
||||||
|
"\n",
|
||||||
|
"# enable Docker \n",
|
||||||
|
"run_config.environment.docker.enabled = True\n",
|
||||||
|
"\n",
|
||||||
|
"azureml_pip_packages = [\n",
|
||||||
|
" 'azureml-defaults', 'azureml-contrib-explain-model', 'azureml-core', 'azureml-telemetry',\n",
|
||||||
|
" 'azureml-explain-model'\n",
|
||||||
|
"]\n",
|
||||||
|
"\n",
|
||||||
|
"# specify CondaDependencies obj\n",
|
||||||
|
"run_config.environment.python.conda_dependencies = CondaDependencies.create(conda_packages=['scikit-learn'],\n",
|
||||||
|
" pip_packages=azureml_pip_packages)\n",
|
||||||
|
"\n",
|
||||||
|
"from azureml.core import Run\n",
|
||||||
|
"from azureml.core import ScriptRunConfig\n",
|
||||||
|
"\n",
|
||||||
|
"src = ScriptRunConfig(source_directory=project_folder, \n",
|
||||||
|
" script='run_explainer.py', \n",
|
||||||
|
" run_config=run_config) \n",
|
||||||
|
"run = experiment.submit(config=src)\n",
|
||||||
|
"run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%time\n",
|
||||||
|
"# Shows output of the run on stdout.\n",
|
||||||
|
"run.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"run.get_metrics()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.contrib.explain.model.explanation.explanation_client import ExplanationClient\n",
|
||||||
|
"\n",
|
||||||
|
"client = ExplanationClient.from_run(run)\n",
|
||||||
|
"# Get the top k (e.g., 4) most important features with their importance values\n",
|
||||||
|
"explanation = client.download_model_explanation(top_k=4)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Additional operations to perform on AmlCompute\n",
|
||||||
|
"\n",
|
||||||
|
"You can perform more operations on AmlCompute such as updating the node counts or deleting the compute. "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Get_status () gets the latest status of the AmlCompute target\n",
|
||||||
|
"cpu_cluster.get_status().serialize()\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Update () takes in the min_nodes, max_nodes and idle_seconds_before_scaledown and updates the AmlCompute target\n",
|
||||||
|
"# cpu_cluster.update(min_nodes=1)\n",
|
||||||
|
"# cpu_cluster.update(max_nodes=10)\n",
|
||||||
|
"cpu_cluster.update(idle_seconds_before_scaledown=300)\n",
|
||||||
|
"# cpu_cluster.update(min_nodes=2, max_nodes=4, idle_seconds_before_scaledown=600)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Delete () is used to deprovision and delete the AmlCompute target. Useful if you want to re-use the compute name \n",
|
||||||
|
"# 'cpucluster' in this case but use a different VM family for instance.\n",
|
||||||
|
"\n",
|
||||||
|
"# cpu_cluster.delete()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Download Model Explanation Data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.contrib.explain.model.explanation.explanation_client import ExplanationClient\n",
|
||||||
|
"\n",
|
||||||
|
"# Get model explanation data\n",
|
||||||
|
"client = ExplanationClient.from_run(run)\n",
|
||||||
|
"explanation = client.download_model_explanation()\n",
|
||||||
|
"local_importance_values = explanation.local_importance_values\n",
|
||||||
|
"expected_values = explanation.expected_values\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Or you can use the saved run.id to retrive the feature importance values\n",
|
||||||
|
"client = ExplanationClient.from_run_id(ws, experiment_name, run.id)\n",
|
||||||
|
"explanation = client.download_model_explanation()\n",
|
||||||
|
"local_importance_values = explanation.local_importance_values\n",
|
||||||
|
"expected_values = explanation.expected_values"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Get the top k (e.g., 4) most important features with their importance values\n",
|
||||||
|
"explanation = client.download_model_explanation(top_k=4)\n",
|
||||||
|
"global_importance_values = explanation.get_ranked_global_values()\n",
|
||||||
|
"global_importance_names = explanation.get_ranked_global_names()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print('global importance values: {}'.format(global_importance_values))\n",
|
||||||
|
"print('global importance names: {}'.format(global_importance_names))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Success!\n",
|
||||||
|
"Great, you are ready to move on to the remaining notebooks."
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "wamartin"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.6",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python36"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.8"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
@@ -0,0 +1,52 @@
|
|||||||
|
# Copyright (c) Microsoft. All rights reserved.
|
||||||
|
# Licensed under the MIT license.
|
||||||
|
|
||||||
|
from sklearn import datasets
|
||||||
|
from sklearn.linear_model import Ridge
|
||||||
|
from azureml.explain.model.tabular_explainer import TabularExplainer
|
||||||
|
from azureml.contrib.explain.model.explanation.explanation_client import ExplanationClient
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from azureml.core.run import Run
|
||||||
|
from sklearn.externals import joblib
|
||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
os.makedirs('./outputs', exist_ok=True)
|
||||||
|
|
||||||
|
boston_data = datasets.load_boston()
|
||||||
|
|
||||||
|
run = Run.get_context()
|
||||||
|
client = ExplanationClient.from_run(run)
|
||||||
|
|
||||||
|
X_train, X_test, y_train, y_test = train_test_split(boston_data.data,
|
||||||
|
boston_data.target,
|
||||||
|
test_size=0.2,
|
||||||
|
random_state=0)
|
||||||
|
|
||||||
|
alpha = 0.5
|
||||||
|
# Use Ridge algorithm to create a regression model
|
||||||
|
reg = Ridge(alpha)
|
||||||
|
model = reg.fit(X_train, y_train)
|
||||||
|
|
||||||
|
preds = reg.predict(X_test)
|
||||||
|
run.log('alpha', alpha)
|
||||||
|
|
||||||
|
model_file_name = 'ridge_{0:.2f}.pkl'.format(alpha)
|
||||||
|
# save model in the outputs folder so it automatically get uploaded
|
||||||
|
with open(model_file_name, 'wb') as file:
|
||||||
|
joblib.dump(value=reg, filename=os.path.join('./outputs/',
|
||||||
|
model_file_name))
|
||||||
|
|
||||||
|
# Explain predictions on your local machine
|
||||||
|
tabular_explainer = TabularExplainer(model, X_train, features=boston_data.feature_names)
|
||||||
|
|
||||||
|
# Explain overall model predictions (global explanation)
|
||||||
|
# Passing in test dataset for evaluation examples - note it must be a representative sample of the original data
|
||||||
|
# x_train can be passed as well, but with more examples explanations it will
|
||||||
|
# take longer although they may be more accurate
|
||||||
|
global_explanation = tabular_explainer.explain_global(X_test)
|
||||||
|
|
||||||
|
# Uploading model explanation data for storage or visualization in webUX
|
||||||
|
# The explanation can then be downloaded on any compute
|
||||||
|
comment = 'Global explanation on regression model trained on boston dataset'
|
||||||
|
client.upload_model_explanation(global_explanation, comment=comment)
|
||||||