Compare commits

...

29 Commits

Author SHA1 Message Date
amlrelsa-ms
71e061b193 update samples from Release-114 as a part of 1.38.0 SDK stable release 2022-02-16 16:32:55 +00:00
Harneet Virk
9094da4085 Merge pull request #1684 from Azure/release_update/Release-122
update samples from Release-122 as a part of  SDK release
2022-02-14 11:38:49 -08:00
amlrelsa-ms
ebf9d2855c update samples from Release-122 as a part of SDK release 2022-02-14 19:24:27 +00:00
v-pbavanari
1bbd78eb33 update samples from Release-121 as a part of SDK release (#1678)
Co-authored-by: amlrelsa-ms <amlrelsa@microsoft.com>
2022-02-02 12:28:49 -05:00
v-pbavanari
77f5a69e04 update samples from Release-120 as a part of SDK release (#1676)
Co-authored-by: amlrelsa-ms <amlrelsa@microsoft.com>
2022-01-28 12:51:49 -05:00
raja7592
ce82af2ab0 update samples from Release-118 as a part of SDK release (#1673)
Co-authored-by: amlrelsa-ms <amlrelsa@microsoft.com>
2022-01-24 20:07:35 -05:00
Harneet Virk
2a2d2efa17 Merge pull request #1658 from Azure/release_update/Release-117
Update samples from Release sdk 1.37.0 as a part of  SDK release
2021-12-13 10:36:08 -08:00
amlrelsa-ms
dd494e9cac update samples from Release-117 as a part of SDK release 2021-12-13 16:57:22 +00:00
Harneet Virk
352adb7487 Merge pull request #1629 from Azure/release_update/Release-116
Update samples from Release as a part of SDK release 1.36.0
2021-11-08 09:48:25 -08:00
amlrelsa-ms
aebe34b4e8 update samples from Release-116 as a part of SDK release 2021-11-08 16:09:41 +00:00
Harneet Virk
c7e1241e20 Merge pull request #1612 from Azure/release_update/Release-115
Update samples from Release-115 as a part of  SDK release
2021-10-11 12:01:59 -07:00
amlrelsa-ms
6529298c24 update samples from Release-115 as a part of SDK release 2021-10-11 16:09:57 +00:00
Harneet Virk
e2dddfde85 Merge pull request #1601 from Azure/release_update/Release-114
update samples from Release-114 as a part of  SDK release
2021-09-29 14:21:59 -07:00
amlrelsa-ms
36d96f96ec update samples from Release-114 as a part of SDK release 2021-09-29 20:16:51 +00:00
Harneet Virk
7ebcfea5a3 Merge pull request #1600 from Azure/release_update/Release-113
update samples from Release-113 as a part of  SDK release
2021-09-28 12:53:57 -07:00
amlrelsa-ms
b20bfed33a update samples from Release-113 as a part of SDK release 2021-09-28 19:44:58 +00:00
Harneet Virk
a66a92e338 Merge pull request #1597 from Azure/release_update/Release-112
update samples from Release-112 as a part of  SDK release
2021-09-24 14:44:53 -07:00
amlrelsa-ms
c56c2c3525 update samples from Release-112 as a part of SDK release 2021-09-24 21:40:44 +00:00
Harneet Virk
4cac072fa4 Merge pull request #1588 from Azure/release_update/Release-111
Update samples from Release-111 as a part of SDK 1.34.0 release
2021-09-09 09:02:38 -07:00
amlrelsa-ms
aeab6b3e28 update samples from Release-111 as a part of SDK release 2021-09-07 17:32:15 +00:00
Harneet Virk
015e261f29 Merge pull request #1581 from Azure/release_update/Release-110
update samples from Release-110 as a part of  SDK release
2021-08-20 09:21:08 -07:00
amlrelsa-ms
d2a423dde9 update samples from Release-110 as a part of SDK release 2021-08-20 00:28:42 +00:00
Harneet Virk
3ecbfd6532 Merge pull request #1578 from Azure/release_update/Release-109
update samples from Release-109 as a part of  SDK release
2021-08-18 18:16:31 -07:00
amlrelsa-ms
02ecb2d755 update samples from Release-109 as a part of SDK release 2021-08-18 22:07:12 +00:00
Harneet Virk
122df6e846 Merge pull request #1576 from Azure/release_update/Release-108
update samples from Release-108 as a part of  SDK release
2021-08-18 09:47:34 -07:00
amlrelsa-ms
7d6a0a2051 update samples from Release-108 as a part of SDK release 2021-08-18 00:33:54 +00:00
Harneet Virk
6cc8af80a2 Merge pull request #1565 from Azure/release_update/Release-107
update samples from Release-107 as a part of  SDK release 1.33
2021-08-02 13:14:30 -07:00
amlrelsa-ms
f61898f718 update samples from Release-107 as a part of SDK release 2021-08-02 18:01:38 +00:00
Harneet Virk
5cb465171e Merge pull request #1556 from Azure/update-spark-notebook
updating spark notebook
2021-07-26 17:09:42 -07:00
189 changed files with 59430 additions and 8746 deletions

View File

@@ -103,7 +103,7 @@
"source": [
"import azureml.core\n",
"\n",
"print(\"This notebook was created using version 1.32.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -6,4 +6,4 @@ dependencies:
- fairlearn>=0.6.2
- joblib
- liac-arff
- raiwidgets~=0.7.0
- raiwidgets~=0.16.0

View File

@@ -6,4 +6,4 @@ dependencies:
- fairlearn>=0.6.2
- joblib
- liac-arff
- raiwidgets~=0.7.0
- raiwidgets~=0.16.0

View File

@@ -4,7 +4,6 @@ dependencies:
# Currently Azure ML only supports 3.5.2 and later.
- pip==21.1.2
- python>=3.5.2,<3.8
- nb_conda
- boto3==1.15.18
- matplotlib==2.1.0
- numpy==1.18.5
@@ -18,11 +17,13 @@ dependencies:
- holidays==0.9.11
- pytorch::pytorch=1.4.0
- cudatoolkit=10.1.243
- tornado==6.1.0
- pip:
# Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.32.0
- azureml-widgets~=1.38.0
- pytorch-transformers==1.0.0
- spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.32.0/validated_win32_requirements.txt [--no-deps]
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.38.0/validated_win32_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -4,7 +4,6 @@ dependencies:
# Currently Azure ML only supports 3.5.2 and later.
- pip==21.1.2
- python>=3.5.2,<3.8
- nb_conda
- boto3==1.15.18
- matplotlib==2.1.0
- numpy==1.18.5
@@ -18,11 +17,13 @@ dependencies:
- holidays==0.9.11
- pytorch::pytorch=1.4.0
- cudatoolkit=10.1.243
- tornado==6.1.0
- pip:
# Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.32.0
- azureml-widgets~=1.38.0
- pytorch-transformers==1.0.0
- spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.32.0/validated_linux_requirements.txt [--no-deps]
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.38.0/validated_linux_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -5,7 +5,6 @@ dependencies:
- pip==21.1.2
- nomkl
- python>=3.5.2,<3.8
- nb_conda
- boto3==1.15.18
- matplotlib==2.1.0
- numpy==1.18.5
@@ -19,11 +18,13 @@ dependencies:
- holidays==0.9.11
- pytorch::pytorch=1.4.0
- cudatoolkit=9.0
- tornado==6.1.0
- pip:
# Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.32.0
- azureml-widgets~=1.38.0
- pytorch-transformers==1.0.0
- spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.32.0/validated_darwin_requirements.txt [--no-deps]
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.38.0/validated_darwin_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -3,7 +3,7 @@ import platform
try:
import conda
except:
except Exception:
print('Failed to import conda.')
print('This setup is usually run from the base conda environment.')
print('You can activate the base environment using the command "conda activate base"')

View File

@@ -1,497 +1,483 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated Machine Learning\n",
"_**Classification of credit card fraudulent transactions on remote compute **_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Train](#Train)\n",
"1. [Results](#Results)\n",
"1. [Test](#Test)\n",
"1. [Acknowledgements](#Acknowledgements)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"\n",
"In this example we use the associated credit card dataset to showcase how you can use AutoML for a simple classification problem. The goal is to predict if a credit card transaction is considered a fraudulent charge.\n",
"\n",
"This notebook is using remote compute to train the model.\n",
"\n",
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
"\n",
"In this notebook you will learn how to:\n",
"1. Create an experiment using an existing workspace.\n",
"2. Configure AutoML using `AutoMLConfig`.\n",
"3. Train the model using remote compute.\n",
"4. Explore the results.\n",
"5. Test the fitted model."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"from matplotlib import pyplot as plt\n",
"import pandas as pd\n",
"import os\n",
"\n",
"import azureml.core\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.core.dataset import Dataset\n",
"from azureml.train.automl import AutoMLConfig"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.32.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for experiment\n",
"experiment_name = 'automl-classification-ccard-remote'\n",
"\n",
"experiment=Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create or Attach existing AmlCompute\n",
"A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
"\n",
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# Choose a name for your CPU cluster\n",
"cpu_cluster_name = \"cpu-cluster-1\"\n",
"\n",
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print('Found existing cluster, use it.')\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
" max_nodes=6)\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load Data\n",
"\n",
"Load the credit card dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
"label_column_name = 'Class'"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train\n",
"\n",
"Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.\n",
"\n",
"|Property|Description|\n",
"|-|-|\n",
"|**task**|classification or regression|\n",
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
"|**enable_early_stopping**|Stop the run if the metric score is not showing improvement.|\n",
"|**n_cross_validations**|Number of cross validation splits.|\n",
"|**training_data**|Input dataset, containing both features and label column.|\n",
"|**label_column_name**|The name of the label column.|\n",
"\n",
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_settings = {\n",
" \"n_cross_validations\": 3,\n",
" \"primary_metric\": 'average_precision_score_weighted',\n",
" \"enable_early_stopping\": True,\n",
" \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n",
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n",
" \"verbosity\": logging.INFO,\n",
"}\n",
"\n",
"automl_config = AutoMLConfig(task = 'classification',\n",
" debug_log = 'automl_errors.log',\n",
" compute_target = compute_target,\n",
" training_data = training_data,\n",
" label_column_name = label_column_name,\n",
" **automl_settings\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run = experiment.submit(automl_config, show_output = False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# If you need to retrieve a run that already started, use the following code\n",
"#from azureml.train.automl.run import AutoMLRun\n",
"#remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Widget for Monitoring Runs\n",
"\n",
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
"\n",
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"widget-rundetails-sample"
]
},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"RunDetails(remote_run).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Explain model\n",
"\n",
"Automated ML models can be explained and visualized using the SDK Explainability library. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Analyze results\n",
"\n",
"### Retrieve the Best Model\n",
"\n",
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = remote_run.get_output()\n",
"fitted_model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Print the properties of the model\n",
"The fitted_model is a python object and you can read the different properties of the object.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the fitted model\n",
"\n",
"Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# convert the test data to dataframe\n",
"X_test_df = validation_data.drop_columns(columns=[label_column_name]).to_pandas_dataframe()\n",
"y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True).to_pandas_dataframe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# call the predict functions on the model\n",
"y_pred = fitted_model.predict(X_test_df)\n",
"y_pred"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Calculate metrics for the prediction\n",
"\n",
"Now visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values \n",
"from the trained model that was returned."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.metrics import confusion_matrix\n",
"import numpy as np\n",
"import itertools\n",
"\n",
"cf =confusion_matrix(y_test_df.values,y_pred)\n",
"plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')\n",
"plt.colorbar()\n",
"plt.title('Confusion Matrix')\n",
"plt.xlabel('Predicted')\n",
"plt.ylabel('Actual')\n",
"class_labels = ['False','True']\n",
"tick_marks = np.arange(len(class_labels))\n",
"plt.xticks(tick_marks,class_labels)\n",
"plt.yticks([-0.5,0,1,1.5],['','False','True',''])\n",
"# plotting text value inside cells\n",
"thresh = cf.max() / 2.\n",
"for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])):\n",
" plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Acknowledgements"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud\n",
"\n",
"The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00c3\u00a9 Libre de Bruxelles) on big data mining and fraud detection.\n",
"More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project\n",
"\n",
"Please cite the following works:\n",
"\n",
"Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n",
"\n",
"Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\n",
"\n",
"Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n",
"\n",
"Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\n",
"\n",
"Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00c3\u00abl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\n",
"\n",
"Carcillo, Fabrizio; Le Borgne, Yann-A\u00c3\u00abl; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\n",
"\n",
"Bertrand Lebichot, Yann-A\u00c3\u00abl Le Borgne, Liyun He, Frederic Obl\u00c3\u00a9, Gianluca Bontempi Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection, INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\n",
"\n",
"Fabrizio Carcillo, Yann-A\u00c3\u00abl Le Borgne, Olivier Caelen, Frederic Obl\u00c3\u00a9, Gianluca Bontempi Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection Information Sciences, 2019"
]
}
],
"metadata": {
"authors": [
{
"name": "ratanase"
}
],
"category": "tutorial",
"compute": [
"AML Compute"
],
"datasets": [
"Creditcard"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"file_extension": ".py",
"framework": [
"None"
],
"friendly_name": "Classification of credit card fraudulent transactions using Automated ML",
"index_order": 5,
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
},
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"tags": [
"remote_run",
"AutomatedML"
],
"task": "Classification",
"version": "3.6.7"
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated Machine Learning\n",
"_**Classification of credit card fraudulent transactions on remote compute **_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Train](#Train)\n",
"1. [Results](#Results)\n",
"1. [Test](#Test)\n",
"1. [Acknowledgements](#Acknowledgements)"
]
},
"nbformat": 4,
"nbformat_minor": 2
}
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"\n",
"In this example we use the associated credit card dataset to showcase how you can use AutoML for a simple classification problem. The goal is to predict if a credit card transaction is considered a fraudulent charge.\n",
"\n",
"This notebook is using remote compute to train the model.\n",
"\n",
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
"\n",
"In this notebook you will learn how to:\n",
"1. Create an experiment using an existing workspace.\n",
"2. Configure AutoML using `AutoMLConfig`.\n",
"3. Train the model using remote compute.\n",
"4. Explore the results.\n",
"5. Test the fitted model."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"from matplotlib import pyplot as plt\n",
"import pandas as pd\n",
"import os\n",
"\n",
"import azureml.core\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.core.dataset import Dataset\n",
"from azureml.train.automl import AutoMLConfig"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for experiment\n",
"experiment_name = \"automl-classification-ccard-remote\"\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Experiment Name\"] = experiment.name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create or Attach existing AmlCompute\n",
"A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
"\n",
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# Choose a name for your CPU cluster\n",
"cpu_cluster_name = \"cpu-cluster-1\"\n",
"\n",
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"compute_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load Data\n",
"\n",
"Load the credit card dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
"label_column_name = \"Class\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train\n",
"\n",
"Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.\n",
"\n",
"|Property|Description|\n",
"|-|-|\n",
"|**task**|classification or regression|\n",
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
"|**enable_early_stopping**|Stop the run if the metric score is not showing improvement.|\n",
"|**n_cross_validations**|Number of cross validation splits.|\n",
"|**training_data**|Input dataset, containing both features and label column.|\n",
"|**label_column_name**|The name of the label column.|\n",
"\n",
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_settings = {\n",
" \"n_cross_validations\": 3,\n",
" \"primary_metric\": \"average_precision_score_weighted\",\n",
" \"enable_early_stopping\": True,\n",
" \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n",
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n",
" \"verbosity\": logging.INFO,\n",
"}\n",
"\n",
"automl_config = AutoMLConfig(\n",
" task=\"classification\",\n",
" debug_log=\"automl_errors.log\",\n",
" compute_target=compute_target,\n",
" training_data=training_data,\n",
" label_column_name=label_column_name,\n",
" **automl_settings,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run = experiment.submit(automl_config, show_output=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# If you need to retrieve a run that already started, use the following code\n",
"# from azureml.train.automl.run import AutoMLRun\n",
"# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Widget for Monitoring Runs\n",
"\n",
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
"\n",
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"widget-rundetails-sample"
]
},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"\n",
"RunDetails(remote_run).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Explain model\n",
"\n",
"Automated ML models can be explained and visualized using the SDK Explainability library. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Analyze results\n",
"\n",
"### Retrieve the Best Model\n",
"\n",
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = remote_run.get_output()\n",
"fitted_model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Print the properties of the model\n",
"The fitted_model is a python object and you can read the different properties of the object.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the fitted model\n",
"\n",
"Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# convert the test data to dataframe\n",
"X_test_df = validation_data.drop_columns(\n",
" columns=[label_column_name]\n",
").to_pandas_dataframe()\n",
"y_test_df = validation_data.keep_columns(\n",
" columns=[label_column_name], validate=True\n",
").to_pandas_dataframe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# call the predict functions on the model\n",
"y_pred = fitted_model.predict(X_test_df)\n",
"y_pred"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Calculate metrics for the prediction\n",
"\n",
"Now visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values \n",
"from the trained model that was returned."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.metrics import confusion_matrix\n",
"import numpy as np\n",
"import itertools\n",
"\n",
"cf = confusion_matrix(y_test_df.values, y_pred)\n",
"plt.imshow(cf, cmap=plt.cm.Blues, interpolation=\"nearest\")\n",
"plt.colorbar()\n",
"plt.title(\"Confusion Matrix\")\n",
"plt.xlabel(\"Predicted\")\n",
"plt.ylabel(\"Actual\")\n",
"class_labels = [\"False\", \"True\"]\n",
"tick_marks = np.arange(len(class_labels))\n",
"plt.xticks(tick_marks, class_labels)\n",
"plt.yticks([-0.5, 0, 1, 1.5], [\"\", \"False\", \"True\", \"\"])\n",
"# plotting text value inside cells\n",
"thresh = cf.max() / 2.0\n",
"for i, j in itertools.product(range(cf.shape[0]), range(cf.shape[1])):\n",
" plt.text(\n",
" j,\n",
" i,\n",
" format(cf[i, j], \"d\"),\n",
" horizontalalignment=\"center\",\n",
" color=\"white\" if cf[i, j] > thresh else \"black\",\n",
" )\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Acknowledgements"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud\n",
"\n",
"The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection.\n",
"More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project\n",
"\n",
"Please cite the following works:\n",
"\n",
"Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n",
"\n",
"Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\n",
"\n",
"Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n",
"\n",
"Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\n",
"\n",
"Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Aël; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\n",
"\n",
"Carcillo, Fabrizio; Le Borgne, Yann-Aël; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\n",
"\n",
"Bertrand Lebichot, Yann-Aël Le Borgne, Liyun He, Frederic Oblé, Gianluca Bontempi Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection, INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\n",
"\n",
"Fabrizio Carcillo, Yann-Aël Le Borgne, Olivier Caelen, Frederic Oblé, Gianluca Bontempi Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection Information Sciences, 2019"
]
}
],
"metadata": {
"authors": [
{
"name": "ratanase"
}
],
"category": "tutorial",
"compute": [
"AML Compute"
],
"datasets": [
"Creditcard"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"file_extension": ".py",
"framework": [
"None"
],
"friendly_name": "Classification of credit card fraudulent transactions using Automated ML",
"index_order": 5,
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
},
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"tags": [
"remote_run",
"AutomatedML"
],
"task": "Classification",
"version": "3.6.7"
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -4,52 +4,65 @@ from azureml.train.estimator import Estimator
from azureml.core.run import Run
def run_inference(test_experiment, compute_target, script_folder, train_run,
test_dataset, target_column_name, model_name):
def run_inference(
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
target_column_name,
model_name,
):
inference_env = train_run.get_environment()
est = Estimator(source_directory=script_folder,
entry_script='infer.py',
script_params={
'--target_column_name': target_column_name,
'--model_name': model_name
},
inputs=[
test_dataset.as_named_input('test_data')
],
compute_target=compute_target,
environment_definition=inference_env)
est = Estimator(
source_directory=script_folder,
entry_script="infer.py",
script_params={
"--target_column_name": target_column_name,
"--model_name": model_name,
},
inputs=[test_dataset.as_named_input("test_data")],
compute_target=compute_target,
environment_definition=inference_env,
)
run = test_experiment.submit(
est, tags={
'training_run_id': train_run.id,
'run_algorithm': train_run.properties['run_algorithm'],
'valid_score': train_run.properties['score'],
'primary_metric': train_run.properties['primary_metric']
})
est,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags['run_algorithm'])
run.log("run_algorithm", run.tags["run_algorithm"])
return run
def get_result_df(remote_run):
children = list(remote_run.get_children(recursive=True))
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm',
'primary_metric', 'Score'])
summary_df = pd.DataFrame(
index=["run_id", "run_algorithm", "primary_metric", "Score"]
)
goal_minimize = False
for run in children:
if('run_algorithm' in run.properties and 'score' in run.properties):
summary_df[run.id] = [run.id, run.properties['run_algorithm'],
run.properties['primary_metric'],
float(run.properties['score'])]
if('goal' in run.properties):
goal_minimize = run.properties['goal'].split('_')[-1] == 'min'
if "run_algorithm" in run.properties and "score" in run.properties:
summary_df[run.id] = [
run.id,
run.properties["run_algorithm"],
run.properties["primary_metric"],
float(run.properties["score"]),
]
if "goal" in run.properties:
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
summary_df = summary_df.T.sort_values(
'Score',
ascending=goal_minimize).drop_duplicates(['run_algorithm'])
summary_df = summary_df.set_index('run_algorithm')
"Score", ascending=goal_minimize
).drop_duplicates(["run_algorithm"])
summary_df = summary_df.set_index("run_algorithm")
return summary_df

View File

@@ -12,19 +12,22 @@ from azureml.core.model import Model
parser = argparse.ArgumentParser()
parser.add_argument(
'--target_column_name', type=str, dest='target_column_name',
help='Target Column Name')
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
'--model_name', type=str, dest='model_name',
help='Name of registered model')
"--model_name", type=str, dest="model_name", help="Name of registered model"
)
args = parser.parse_args()
target_column_name = args.target_column_name
model_name = args.model_name
print('args passed are: ')
print('Target column name: ', target_column_name)
print('Name of registered model: ', model_name)
print("args passed are: ")
print("Target column name: ", target_column_name)
print("Name of registered model: ", model_name)
model_path = Model.get_model_path(model_name)
# deserialize the model file back into a sklearn model
@@ -32,13 +35,16 @@ model = joblib.load(model_path)
run = Run.get_context()
# get input dataset by name
test_dataset = run.input_datasets['test_data']
test_dataset = run.input_datasets["test_data"]
X_test_df = test_dataset.drop_columns(columns=[target_column_name]) \
.to_pandas_dataframe()
y_test_df = test_dataset.with_timestamp_columns(None) \
.keep_columns(columns=[target_column_name]) \
.to_pandas_dataframe()
X_test_df = test_dataset.drop_columns(
columns=[target_column_name]
).to_pandas_dataframe()
y_test_df = (
test_dataset.with_timestamp_columns(None)
.keep_columns(columns=[target_column_name])
.to_pandas_dataframe()
)
predicted = model.predict_proba(X_test_df)
@@ -47,11 +53,13 @@ if isinstance(predicted, pd.DataFrame):
# Use the AutoML scoring module
train_labels = model.classes_
class_labels = np.unique(np.concatenate((y_test_df.values, np.reshape(train_labels, (-1, 1)))))
class_labels = np.unique(
np.concatenate((y_test_df.values, np.reshape(train_labels, (-1, 1))))
)
classification_metrics = list(constants.CLASSIFICATION_SCALAR_SET)
scores = scoring.score_classification(y_test_df.values, predicted,
classification_metrics,
class_labels, train_labels)
scores = scoring.score_classification(
y_test_df.values, predicted, classification_metrics, class_labels, train_labels
)
print("scores:")
print(scores)

View File

@@ -25,9 +25,11 @@ datasets = [(Dataset.Scenario.TRAINING, train_ds)]
# Register model with training dataset
model = Model.register(workspace=ws,
model_path=args.model_path,
model_name=args.model_name,
datasets=datasets)
model = Model.register(
workspace=ws,
model_path=args.model_path,
model_name=args.model_name,
datasets=datasets,
)
print("Registered version {0} of model {1}".format(model.version, model.name))

View File

@@ -16,26 +16,82 @@ if type(run) == _OfflineRun:
else:
ws = run.experiment.workspace
usaf_list = ['725724', '722149', '723090', '722159', '723910', '720279',
'725513', '725254', '726430', '720381', '723074', '726682',
'725486', '727883', '723177', '722075', '723086', '724053',
'725070', '722073', '726060', '725224', '725260', '724520',
'720305', '724020', '726510', '725126', '722523', '703333',
'722249', '722728', '725483', '722972', '724975', '742079',
'727468', '722193', '725624', '722030', '726380', '720309',
'722071', '720326', '725415', '724504', '725665', '725424',
'725066']
usaf_list = [
"725724",
"722149",
"723090",
"722159",
"723910",
"720279",
"725513",
"725254",
"726430",
"720381",
"723074",
"726682",
"725486",
"727883",
"723177",
"722075",
"723086",
"724053",
"725070",
"722073",
"726060",
"725224",
"725260",
"724520",
"720305",
"724020",
"726510",
"725126",
"722523",
"703333",
"722249",
"722728",
"725483",
"722972",
"724975",
"742079",
"727468",
"722193",
"725624",
"722030",
"726380",
"720309",
"722071",
"720326",
"725415",
"724504",
"725665",
"725424",
"725066",
]
def get_noaa_data(start_time, end_time):
columns = ['usaf', 'wban', 'datetime', 'latitude', 'longitude', 'elevation',
'windAngle', 'windSpeed', 'temperature', 'stationName', 'p_k']
columns = [
"usaf",
"wban",
"datetime",
"latitude",
"longitude",
"elevation",
"windAngle",
"windSpeed",
"temperature",
"stationName",
"p_k",
]
isd = NoaaIsdWeather(start_time, end_time, cols=columns)
noaa_df = isd.to_pandas_dataframe()
df_filtered = noaa_df[noaa_df["usaf"].isin(usaf_list)]
df_filtered.reset_index(drop=True)
print("Received {0} rows of training data between {1} and {2}".format(
df_filtered.shape[0], start_time, end_time))
print(
"Received {0} rows of training data between {1} and {2}".format(
df_filtered.shape[0], start_time, end_time
)
)
return df_filtered
@@ -54,11 +110,12 @@ end_time = datetime.utcnow()
try:
ds = Dataset.get_by_name(ws, args.ds_name)
end_time_last_slice = ds.data_changed_time.replace(tzinfo=None)
print("Dataset {0} last updated on {1}".format(args.ds_name,
end_time_last_slice))
print("Dataset {0} last updated on {1}".format(args.ds_name, end_time_last_slice))
except Exception:
print(traceback.format_exc())
print("Dataset with name {0} not found, registering new dataset.".format(args.ds_name))
print(
"Dataset with name {0} not found, registering new dataset.".format(args.ds_name)
)
register_dataset = True
end_time = datetime(2021, 5, 1, 0, 0)
end_time_last_slice = end_time - relativedelta(weeks=2)
@@ -66,26 +123,35 @@ except Exception:
train_df = get_noaa_data(end_time_last_slice, end_time)
if train_df.size > 0:
print("Received {0} rows of new data after {1}.".format(
train_df.shape[0], end_time_last_slice))
folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(args.ds_name, end_time.year,
end_time.month, end_time.day,
end_time.hour, end_time.minute,
end_time.second)
print(
"Received {0} rows of new data after {1}.".format(
train_df.shape[0], end_time_last_slice
)
)
folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(
args.ds_name,
end_time.year,
end_time.month,
end_time.day,
end_time.hour,
end_time.minute,
end_time.second,
)
file_path = "{0}/data.csv".format(folder_name)
# Add a new partition to the registered dataset
os.makedirs(folder_name, exist_ok=True)
train_df.to_csv(file_path, index=False)
dstor.upload_files(files=[file_path],
target_path=folder_name,
overwrite=True,
show_progress=True)
dstor.upload_files(
files=[file_path], target_path=folder_name, overwrite=True, show_progress=True
)
else:
print("No new data since {0}.".format(end_time_last_slice))
if register_dataset:
ds = Dataset.Tabular.from_delimited_files(dstor.path("{}/**/*.csv".format(
args.ds_name)), partition_format='/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv')
ds = Dataset.Tabular.from_delimited_files(
dstor.path("{}/**/*.csv".format(args.ds_name)),
partition_format="/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv",
)
ds.register(ws, name=args.ds_name)

View File

@@ -4,7 +4,6 @@ dependencies:
# Currently Azure ML only supports 3.5.2 and later.
- pip<=19.3.1
- python>=3.5.2,<3.8
- nb_conda
- cython
- urllib3<1.24
- PyJWT < 2.0.0

View File

@@ -5,7 +5,6 @@ dependencies:
- pip<=19.3.1
- nomkl
- python>=3.5.2,<3.8
- nb_conda
- cython
- urllib3<1.24
- PyJWT < 2.0.0

View File

@@ -92,7 +92,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.32.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -91,7 +91,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.32.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@@ -0,0 +1,167 @@
from typing import Any, Dict, Optional, List
import argparse
import json
import os
import re
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from azureml.automl.core.shared import constants
from azureml.automl.core.shared.types import GrainType
from azureml.automl.runtime.shared.score import scoring
GRAIN = "time_series_id"
BACKTEST_ITER = "backtest_iteration"
ACTUALS = "actual_level"
PREDICTIONS = "predicted_level"
ALL_GRAINS = "all_sets"
FORECASTS_FILE = "forecast.csv"
SCORES_FILE = "scores.csv"
PLOTS_FILE = "plots_fcst_vs_actual.pdf"
RE_INVALID_SYMBOLS = re.compile("[: ]")
def _compute_metrics(df: pd.DataFrame, metrics: List[str]):
"""
Compute metrics for one data frame.
:param df: The data frame which contains actual_level and predicted_level columns.
:return: The data frame with two columns - metric_name and metric.
"""
scores = scoring.score_regression(
y_test=df[ACTUALS], y_pred=df[PREDICTIONS], metrics=metrics
)
metrics_df = pd.DataFrame(list(scores.items()), columns=["metric_name", "metric"])
metrics_df.sort_values(["metric_name"], inplace=True)
metrics_df.reset_index(drop=True, inplace=True)
return metrics_df
def _format_grain_name(grain: GrainType) -> str:
"""
Convert grain name to string.
:param grain: the grain name.
:return: the string representation of the given grain.
"""
if not isinstance(grain, tuple) and not isinstance(grain, list):
return str(grain)
grain = list(map(str, grain))
return "|".join(grain)
def compute_all_metrics(
fcst_df: pd.DataFrame,
ts_id_colnames: List[str],
metric_names: Optional[List[set]] = None,
):
"""
Calculate metrics per grain.
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
:param metric_names: (optional) the list of metric names to return
:param ts_id_colnames: (optional) list of grain column names
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
"""
if not metric_names:
metric_names = list(constants.Metric.SCALAR_REGRESSION_SET)
if ts_id_colnames is None:
ts_id_colnames = []
metrics_list = []
if ts_id_colnames:
for grain, df in fcst_df.groupby(ts_id_colnames):
one_grain_metrics_df = _compute_metrics(df, metric_names)
one_grain_metrics_df[GRAIN] = _format_grain_name(grain)
metrics_list.append(one_grain_metrics_df)
# overall metrics
one_grain_metrics_df = _compute_metrics(fcst_df, metric_names)
one_grain_metrics_df[GRAIN] = ALL_GRAINS
metrics_list.append(one_grain_metrics_df)
# collect into a data frame
return pd.concat(metrics_list)
def _draw_one_plot(
df: pd.DataFrame,
time_column_name: str,
grain_column_names: List[str],
pdf: PdfPages,
) -> None:
"""
Draw the single plot.
:param df: The data frame with the data to build plot.
:param time_column_name: The name of a time column.
:param grain_column_names: The name of grain columns.
:param pdf: The pdf backend used to render the plot.
"""
fig, _ = plt.subplots(figsize=(20, 10))
df = df.set_index(time_column_name)
plt.plot(df[[ACTUALS, PREDICTIONS]])
plt.xticks(rotation=45)
iteration = df[BACKTEST_ITER].iloc[0]
if grain_column_names:
grain_name = [df[grain].iloc[0] for grain in grain_column_names]
plt.title(f"Time series ID: {_format_grain_name(grain_name)} {iteration}")
plt.legend(["actual", "forecast"])
plt.close(fig)
pdf.savefig(fig)
def calculate_scores_and_build_plots(
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
):
os.makedirs(output_dir, exist_ok=True)
grains = automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
if grains is None:
grains = []
if isinstance(grains, str):
grains = [grains]
while BACKTEST_ITER in grains:
grains.remove(BACKTEST_ITER)
dfs = []
for fle in os.listdir(input_dir):
file_path = os.path.join(input_dir, fle)
if os.path.isfile(file_path) and file_path.endswith(".csv"):
df_iter = pd.read_csv(file_path, parse_dates=[time_column_name])
for _, iteration in df_iter.groupby(BACKTEST_ITER):
dfs.append(iteration)
forecast_df = pd.concat(dfs, sort=False, ignore_index=True)
# To make sure plots are in order, sort the predictions by grain and iteration.
ts_index = grains + [BACKTEST_ITER]
forecast_df.sort_values(by=ts_index, inplace=True)
pdf = PdfPages(os.path.join(output_dir, PLOTS_FILE))
for _, one_forecast in forecast_df.groupby(ts_index):
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
pdf.close()
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
if __name__ == "__main__":
args = {"forecasts": "--forecasts", "scores_out": "--output-dir"}
parser = argparse.ArgumentParser("Parsing input arguments.")
for argname, arg in args.items():
parser.add_argument(arg, dest=argname, required=True)
parsed_args, _ = parser.parse_known_args()
input_dir = parsed_args.forecasts
output_dir = parsed_args.scores_out
with open(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
)
) as json_file:
automl_settings = json.load(json_file)
calculate_scores_and_build_plots(input_dir, output_dir, automl_settings)

View File

@@ -0,0 +1,725 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Many Models with Backtesting - Automated ML\n",
"**_Backtest many models time series forecasts with Automated Machine Learning_**\n",
"\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For this notebook we are using a synthetic dataset to demonstrate the back testing in many model scenario. This allows us to check historical performance of AutoML on a historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n",
"\n",
"Thus, it is a quick way of evaluating AutoML as if it was in production. Here, we do not test historical performance of a particular model, for this see the [notebook](../forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb). Instead, the best model for every backtest iteration can be different since AutoML chooses the best model for a given training set.\n",
"![Backtesting](Backtesting.png)\n",
"\n",
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prerequisites\n",
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1.0 Set up workspace, datastore, experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003526897
}
},
"outputs": [],
"source": [
"import os\n",
"\n",
"import azureml.core\n",
"from azureml.core import Workspace, Datastore\n",
"import numpy as np\n",
"import pandas as pd\n",
"\n",
"from pandas.tseries.frequencies import to_offset\n",
"\n",
"# Set up your workspace\n",
"ws = Workspace.from_config()\n",
"ws.get_details()\n",
"\n",
"# Set up your datastores\n",
"dstore = ws.get_default_datastore()\n",
"\n",
"output = {}\n",
"output[\"SDK version\"] = azureml.core.VERSION\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Default datastore name\"] = dstore.name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook is compatible with Azure ML SDK version 1.35.1 or later."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose an experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003540729
}
},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment = Experiment(ws, \"automl-many-models-backtest\")\n",
"\n",
"print(\"Experiment name: \" + experiment.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2.0 Data\n",
"\n",
"#### 2.1 Data generation\n",
"For this notebook we will generate the artificial data set with two [time series IDs](https://docs.microsoft.com/en-us/python/api/azureml-automl-core/azureml.automl.core.forecasting_parameters.forecastingparameters?view=azure-ml-py). Then we will generate backtest folds and will upload it to the default BLOB storage and create a [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# simulate data: 2 grains - 700\n",
"TIME_COLNAME = \"date\"\n",
"TARGET_COLNAME = \"value\"\n",
"TIME_SERIES_ID_COLNAME = \"ts_id\"\n",
"\n",
"sample_size = 700\n",
"# Set the random seed for reproducibility of results.\n",
"np.random.seed(20)\n",
"X1 = pd.DataFrame(\n",
" {\n",
" TIME_COLNAME: pd.date_range(start=\"2018-01-01\", periods=sample_size),\n",
" TARGET_COLNAME: np.random.normal(loc=100, scale=20, size=sample_size),\n",
" TIME_SERIES_ID_COLNAME: \"ts_A\",\n",
" }\n",
")\n",
"X2 = pd.DataFrame(\n",
" {\n",
" TIME_COLNAME: pd.date_range(start=\"2018-01-01\", periods=sample_size),\n",
" TARGET_COLNAME: np.random.normal(loc=100, scale=20, size=sample_size),\n",
" TIME_SERIES_ID_COLNAME: \"ts_B\",\n",
" }\n",
")\n",
"\n",
"X = pd.concat([X1, X2], ignore_index=True, sort=False)\n",
"print(\"Simulated dataset contains {} rows \\n\".format(X.shape[0]))\n",
"X.head()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we will generate 8 backtesting folds with backtesting period of 7 days and with the same forecasting horizon. We will add the column \"backtest_iteration\", which will identify the backtesting period by the last training date."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"offset_type = \"7D\"\n",
"NUMBER_OF_BACKTESTS = 8 # number of train/test sets to generate\n",
"\n",
"dfs_train = []\n",
"dfs_test = []\n",
"for ts_id, df_one in X.groupby(TIME_SERIES_ID_COLNAME):\n",
"\n",
" data_end = df_one[TIME_COLNAME].max()\n",
"\n",
" for i in range(NUMBER_OF_BACKTESTS):\n",
" train_cutoff_date = data_end - to_offset(offset_type)\n",
" df_one = df_one.copy()\n",
" df_one[\"backtest_iteration\"] = \"iteration_\" + str(train_cutoff_date)\n",
" train = df_one[df_one[TIME_COLNAME] <= train_cutoff_date]\n",
" test = df_one[\n",
" (df_one[TIME_COLNAME] > train_cutoff_date)\n",
" & (df_one[TIME_COLNAME] <= data_end)\n",
" ]\n",
" data_end = train[TIME_COLNAME].max()\n",
" dfs_train.append(train)\n",
" dfs_test.append(test)\n",
"\n",
"X_train = pd.concat(dfs_train, sort=False, ignore_index=True)\n",
"X_test = pd.concat(dfs_test, sort=False, ignore_index=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### 2.2 Create the Tabular Data Set.\n",
"\n",
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
"\n",
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.\n",
"\n",
"In this next step, we will upload the data and create a TabularDataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"ds = ws.get_default_datastore()\n",
"# Upload saved data to the default data store.\n",
"train_data = TabularDatasetFactory.register_pandas_dataframe(\n",
" X_train, target=(ds, \"data_mm\"), name=\"data_train\"\n",
")\n",
"test_data = TabularDatasetFactory.register_pandas_dataframe(\n",
" X_test, target=(ds, \"data_mm\"), name=\"data_test\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3.0 Build the training pipeline\n",
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose a compute target\n",
"\n",
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"\n",
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
"\n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007037308
}
},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"\n",
"# Name your cluster\n",
"compute_name = \"backtest-mm\"\n",
"\n",
"\n",
"if compute_name in ws.compute_targets:\n",
" compute_target = ws.compute_targets[compute_name]\n",
" if compute_target and type(compute_target) is AmlCompute:\n",
" print(\"Found compute target: \" + compute_name)\n",
"else:\n",
" print(\"Creating a new compute target...\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" # Create the compute target\n",
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
"\n",
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
" # If no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
" )\n",
"\n",
" # For a more detailed view of current cluster status, use the 'status' property\n",
" print(compute_target.status.serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up training parameters\n",
"\n",
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition. Please note, that in this case we are setting grain_column_names to be the time series ID column plus iteration, because we want to train a separate model for each time series and iteration.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n",
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>normalized_root_mean_squared_error</i><br><i>normalized_mean_absolute_error</i> |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
"| **label_column_name** | The name of the label column. |\n",
"| **max_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **grain_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007061544
}
},
"outputs": [],
"source": [
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsTrainParameters,\n",
")\n",
"\n",
"partition_column_names = [TIME_SERIES_ID_COLNAME, \"backtest_iteration\"]\n",
"automl_settings = {\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
" \"iterations\": 15,\n",
" \"experiment_timeout_hours\": 0.25, # This also needs to be changed based on the dataset. For larger data set this number needs to be bigger.\n",
" \"label_column_name\": TARGET_COLNAME,\n",
" \"n_cross_validations\": 3,\n",
" \"time_column_name\": TIME_COLNAME,\n",
" \"max_horizon\": 6,\n",
" \"grain_column_names\": partition_column_names,\n",
" \"track_child_runs\": False,\n",
"}\n",
"\n",
"mm_paramters = ManyModelsTrainParameters(\n",
" automl_settings=automl_settings, partition_column_names=partition_column_names\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up many models pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for training. |\n",
"| **train_data** | The file dataset to be used as input to the training run. |\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. |\n",
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
"\n",
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"\n",
"\n",
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
" experiment=experiment,\n",
" train_data=train_data,\n",
" compute_target=compute_target,\n",
" node_count=2,\n",
" process_count_per_node=2,\n",
" run_invocation_timeout=920,\n",
" train_pipeline_parameters=mm_paramters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit the pipeline to run\n",
"Next we submit our pipeline to run. The whole training pipeline takes about 20 minutes using a STANDARD_DS12_V2 VM with our current ParallelRunConfig setting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run = experiment.submit(training_pipeline)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Check the run status, if training_run is in completed state, continue to next section. Otherwise, check the portal for failures."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 4.0 Backtesting\n",
"Now that we selected the best AutoML model for each backtest fold, we will use these models to generate the forecasts and compare with the actuals."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up output dataset for inference data\n",
"Output of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data import OutputFileDatasetConfig\n",
"\n",
"output_inference_data_ds = OutputFileDatasetConfig(\n",
" name=\"many_models_inference_output\",\n",
" destination=(dstore, \"backtesting/inference_data/\"),\n",
").register_on_complete(name=\"backtesting_data_ds\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For many models we need to provide the ManyModelsInferenceParameters object.\n",
"\n",
"#### ManyModelsInferenceParameters arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **partition_column_names** | List of column names that identifies groups. |\n",
"| **target_column_name** | \\[Optional\\] Column name only if the inference dataset has the target. |\n",
"| **time_column_name** | Column name only if it is timeseries. |\n",
"| **many_models_run_id** | \\[Optional\\] Many models pipeline run id where models were trained. |\n",
"\n",
"#### get_many_models_batch_inference_steps arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for inference run. |\n",
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
"| **compute_target** | The compute target that runs the inference pipeline.|\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
"| **process_count_per_node** | The number of processes per node.\n",
"| **train_run_id** | \\[Optional\\] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n",
"| **train_experiment_name** | \\[Optional\\] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
"| **process_count_per_node** | \\[Optional\\] The number of processes per node, by default it's 4. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsInferenceParameters,\n",
")\n",
"\n",
"mm_parameters = ManyModelsInferenceParameters(\n",
" partition_column_names=partition_column_names,\n",
" time_column_name=TIME_COLNAME,\n",
" target_column_name=TARGET_COLNAME,\n",
")\n",
"\n",
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
" experiment=experiment,\n",
" inference_data=test_data,\n",
" node_count=2,\n",
" process_count_per_node=2,\n",
" compute_target=compute_target,\n",
" run_invocation_timeout=300,\n",
" output_datastore=output_inference_data_ds,\n",
" train_run_id=training_run.id,\n",
" train_experiment_name=training_run.experiment.name,\n",
" inference_pipeline_parameters=mm_parameters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"inference_pipeline = Pipeline(ws, steps=inference_steps)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"inference_run = experiment.submit(inference_pipeline)\n",
"inference_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 5.0 Retrieve results and calculate metrics\n",
"\n",
"The pipeline returns one file with the predictions for each times series ID and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. \n",
"\n",
"The next code snippet does the following:\n",
"1. Downloads the contents of the output folder that is passed in the parallel run step \n",
"2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe \n",
"3. Saves the table in csv format and \n",
"4. Displays the top 10 rows of the predictions"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n",
"\n",
"forecasting_results_name = \"forecasting_results\"\n",
"forecasting_output_name = \"many_models_inference_output\"\n",
"forecast_file = get_output_from_mm_pipeline(\n",
" inference_run, forecasting_results_name, forecasting_output_name\n",
")\n",
"df = pd.read_csv(forecast_file, delimiter=\" \", header=None, parse_dates=[0])\n",
"df.columns = list(X_train.columns) + [\"predicted_level\"]\n",
"print(\n",
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
")\n",
"# Save the scv file with header to read it in the next step.\n",
"df.rename(columns={TARGET_COLNAME: \"actual_level\"}, inplace=True)\n",
"df.to_csv(os.path.join(forecasting_results_name, \"forecast.csv\"), index=False)\n",
"df.head(10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## View metrics\n",
"We will read in the obtained results and run the helper script, which will generate metrics and create the plots of predicted versus actual values."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from assets.score import calculate_scores_and_build_plots\n",
"\n",
"backtesting_results = \"backtesting_mm_results\"\n",
"os.makedirs(backtesting_results, exist_ok=True)\n",
"calculate_scores_and_build_plots(\n",
" forecasting_results_name, backtesting_results, automl_settings\n",
")\n",
"pd.DataFrame({\"File\": os.listdir(backtesting_results)})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The directory contains a set of files with results:\n",
"- forecast.csv contains forecasts for all backtest iterations. The backtest_iteration column contains iteration identifier with the last training date as a suffix\n",
"- scores.csv contains all metrics. If data set contains several time series, the metrics are given for all combinations of time series id and iterations, as well as scores for all iterations and time series ids, which are marked as \"all_sets\"\n",
"- plots_fcst_vs_actual.pdf contains the predictions vs forecast plots for each iteration and, eash time series is saved as separate plot.\n",
"\n",
"For demonstration purposes we will display the table of metrics for one of the time series with ID \"ts0\". We will create the utility function, which will build the table with metrics."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_metrics_for_ts(all_metrics, ts):\n",
" \"\"\"\n",
" Get the metrics for the time series with ID ts and return it as pandas data frame.\n",
"\n",
" :param all_metrics: The table with all the metrics.\n",
" :param ts: The ID of a time series of interest.\n",
" :return: The pandas DataFrame with metrics for one time series.\n",
" \"\"\"\n",
" results_df = None\n",
" for ts_id, one_series in all_metrics.groupby(\"time_series_id\"):\n",
" if not ts_id.startswith(ts):\n",
" continue\n",
" iteration = ts_id.split(\"|\")[-1]\n",
" df = one_series[[\"metric_name\", \"metric\"]]\n",
" df.rename({\"metric\": iteration}, axis=1, inplace=True)\n",
" df.set_index(\"metric_name\", inplace=True)\n",
" if results_df is None:\n",
" results_df = df\n",
" else:\n",
" results_df = results_df.merge(\n",
" df, how=\"inner\", left_index=True, right_index=True\n",
" )\n",
" results_df.sort_index(axis=1, inplace=True)\n",
" return results_df\n",
"\n",
"\n",
"metrics_df = pd.read_csv(os.path.join(backtesting_results, \"scores.csv\"))\n",
"ts = \"ts_A\"\n",
"get_metrics_for_ts(metrics_df, ts)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Forecast vs actuals plots."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import IFrame\n",
"\n",
"IFrame(\"./backtesting_mm_results/plots_fcst_vs_actual.pdf\", width=800, height=300)"
]
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"categories": [
"how-to-use-azureml",
"automated-machine-learning"
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-backtest-many-models
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,3 @@
dependencies:
- pip:
- azureml-contrib-automl-pipeline-steps

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@@ -0,0 +1,45 @@
import argparse
import os
import pandas as pd
import azureml.train.automl.runtime._hts.hts_runtime_utilities as hru
from azureml.core import Run
from azureml.core.dataset import Dataset
# Parse the arguments.
args = {
"step_size": "--step-size",
"step_number": "--step-number",
"time_column_name": "--time-column-name",
"time_series_id_column_names": "--time-series-id-column-names",
"out_dir": "--output-dir",
}
parser = argparse.ArgumentParser("Parsing input arguments.")
for argname, arg in args.items():
parser.add_argument(arg, dest=argname, required=True)
parsed_args, _ = parser.parse_known_args()
step_number = int(parsed_args.step_number)
step_size = int(parsed_args.step_size)
# Create the working dirrectory to store the temporary csv files.
working_dir = parsed_args.out_dir
os.makedirs(working_dir, exist_ok=True)
# Set input and output
script_run = Run.get_context()
input_dataset = script_run.input_datasets["training_data"]
X_train = input_dataset.to_pandas_dataframe()
# Split the data.
for i in range(step_number):
file_name = os.path.join(working_dir, "backtest_{}.csv".format(i))
if parsed_args.time_series_id_column_names:
dfs = []
for _, one_series in X_train.groupby([parsed_args.time_series_id_column_names]):
one_series = one_series.sort_values(
by=[parsed_args.time_column_name], inplace=False
)
dfs.append(one_series.iloc[: len(one_series) - step_size * i])
pd.concat(dfs, sort=False, ignore_index=True).to_csv(file_name, index=False)
else:
X_train.sort_values(by=[parsed_args.time_column_name], inplace=True)
X_train.iloc[: len(X_train) - step_size * i].to_csv(file_name, index=False)

View File

@@ -0,0 +1,173 @@
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""The batch script needed for back testing of models using PRS."""
import argparse
import json
import logging
import os
import pickle
import re
import pandas as pd
from azureml.core.experiment import Experiment
from azureml.core.model import Model
from azureml.core.run import Run
from azureml.automl.core.shared import constants
from azureml.automl.runtime.shared.score import scoring
from azureml.train.automl import AutoMLConfig
RE_INVALID_SYMBOLS = re.compile(r"[:\s]")
model_name = None
target_column_name = None
current_step_run = None
output_dir = None
logger = logging.getLogger(__name__)
def _get_automl_settings():
with open(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
)
) as json_file:
return json.load(json_file)
def init():
global model_name
global target_column_name
global output_dir
global automl_settings
global model_uid
logger.info("Initialization of the run.")
parser = argparse.ArgumentParser("Parsing input arguments.")
parser.add_argument("--output-dir", dest="out", required=True)
parser.add_argument("--model-name", dest="model", default=None)
parser.add_argument("--model-uid", dest="model_uid", default=None)
parsed_args, _ = parser.parse_known_args()
model_name = parsed_args.model
automl_settings = _get_automl_settings()
target_column_name = automl_settings.get("label_column_name")
output_dir = parsed_args.out
model_uid = parsed_args.model_uid
os.makedirs(output_dir, exist_ok=True)
os.environ["AUTOML_IGNORE_PACKAGE_VERSION_INCOMPATIBILITIES".lower()] = "True"
def get_run():
global current_step_run
if current_step_run is None:
current_step_run = Run.get_context()
return current_step_run
def run_backtest(data_input_name: str, file_name: str, experiment: Experiment):
"""Re-train the model and return metrics."""
data_input = pd.read_csv(
data_input_name,
parse_dates=[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]],
)
print(data_input.head())
if not automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES):
# There is no grains.
data_input.sort_values(
[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]], inplace=True
)
X_train = data_input.iloc[: -automl_settings["max_horizon"]]
y_train = X_train.pop(target_column_name).values
X_test = data_input.iloc[-automl_settings["max_horizon"] :]
y_test = X_test.pop(target_column_name).values
else:
# The data contain grains.
dfs_train = []
dfs_test = []
for _, one_series in data_input.groupby(
automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
):
one_series.sort_values(
[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]], inplace=True
)
dfs_train.append(one_series.iloc[: -automl_settings["max_horizon"]])
dfs_test.append(one_series.iloc[-automl_settings["max_horizon"] :])
X_train = pd.concat(dfs_train, sort=False, ignore_index=True)
y_train = X_train.pop(target_column_name).values
X_test = pd.concat(dfs_test, sort=False, ignore_index=True)
y_test = X_test.pop(target_column_name).values
last_training_date = str(
X_train[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]].max()
)
if file_name:
# If file name is provided, we will load model and retrain it on backtest data.
with open(file_name, "rb") as fp:
fitted_model = pickle.load(fp)
fitted_model.fit(X_train, y_train)
else:
# We will run the experiment and select the best model.
X_train[target_column_name] = y_train
automl_config = AutoMLConfig(training_data=X_train, **automl_settings)
automl_run = current_step_run.submit_child(automl_config, show_output=True)
best_run, fitted_model = automl_run.get_output()
# As we have generated models, we need to register them for the future use.
description = "Backtest model example"
tags = {"last_training_date": last_training_date, "experiment": experiment.name}
if model_uid:
tags["model_uid"] = model_uid
automl_run.register_model(
model_name=best_run.properties["model_name"],
description=description,
tags=tags,
)
print(f"The model {best_run.properties['model_name']} was registered.")
_, x_pred = fitted_model.forecast(X_test)
x_pred.reset_index(inplace=True, drop=False)
columns = [automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]]
if automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES):
# We know that fitted_model.grain_column_names is a list.
columns.extend(fitted_model.grain_column_names)
columns.append(constants.TimeSeriesInternal.DUMMY_TARGET_COLUMN)
# Remove featurized columns.
x_pred = x_pred[columns]
x_pred.rename(
{constants.TimeSeriesInternal.DUMMY_TARGET_COLUMN: "predicted_level"},
axis=1,
inplace=True,
)
x_pred["actual_level"] = y_test
x_pred["backtest_iteration"] = f"iteration_{last_training_date}"
date_safe = RE_INVALID_SYMBOLS.sub("_", last_training_date)
x_pred.to_csv(os.path.join(output_dir, f"iteration_{date_safe}.csv"), index=False)
return x_pred
def run(input_files):
"""Run the script"""
logger.info("Running mini batch.")
ws = get_run().experiment.workspace
file_name = None
if model_name:
models = Model.list(ws, name=model_name)
cloud_model = None
if models:
for one_mod in models:
if cloud_model is None or one_mod.version > cloud_model.version:
logger.info(
"Using existing model from the workspace. Model version: {}".format(
one_mod.version
)
)
cloud_model = one_mod
file_name = cloud_model.download(exist_ok=True)
forecasts = []
logger.info("Running backtest.")
for input_file in input_files:
forecasts.append(run_backtest(input_file, file_name, get_run().experiment))
return pd.concat(forecasts)

View File

@@ -0,0 +1,167 @@
from typing import Any, Dict, Optional, List
import argparse
import json
import os
import re
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from azureml.automl.core.shared import constants
from azureml.automl.core.shared.types import GrainType
from azureml.automl.runtime.shared.score import scoring
GRAIN = "time_series_id"
BACKTEST_ITER = "backtest_iteration"
ACTUALS = "actual_level"
PREDICTIONS = "predicted_level"
ALL_GRAINS = "all_sets"
FORECASTS_FILE = "forecast.csv"
SCORES_FILE = "scores.csv"
PLOTS_FILE = "plots_fcst_vs_actual.pdf"
RE_INVALID_SYMBOLS = re.compile("[: ]")
def _compute_metrics(df: pd.DataFrame, metrics: List[str]):
"""
Compute metrics for one data frame.
:param df: The data frame which contains actual_level and predicted_level columns.
:return: The data frame with two columns - metric_name and metric.
"""
scores = scoring.score_regression(
y_test=df[ACTUALS], y_pred=df[PREDICTIONS], metrics=metrics
)
metrics_df = pd.DataFrame(list(scores.items()), columns=["metric_name", "metric"])
metrics_df.sort_values(["metric_name"], inplace=True)
metrics_df.reset_index(drop=True, inplace=True)
return metrics_df
def _format_grain_name(grain: GrainType) -> str:
"""
Convert grain name to string.
:param grain: the grain name.
:return: the string representation of the given grain.
"""
if not isinstance(grain, tuple) and not isinstance(grain, list):
return str(grain)
grain = list(map(str, grain))
return "|".join(grain)
def compute_all_metrics(
fcst_df: pd.DataFrame,
ts_id_colnames: List[str],
metric_names: Optional[List[set]] = None,
):
"""
Calculate metrics per grain.
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
:param metric_names: (optional) the list of metric names to return
:param ts_id_colnames: (optional) list of grain column names
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
"""
if not metric_names:
metric_names = list(constants.Metric.SCALAR_REGRESSION_SET)
if ts_id_colnames is None:
ts_id_colnames = []
metrics_list = []
if ts_id_colnames:
for grain, df in fcst_df.groupby(ts_id_colnames):
one_grain_metrics_df = _compute_metrics(df, metric_names)
one_grain_metrics_df[GRAIN] = _format_grain_name(grain)
metrics_list.append(one_grain_metrics_df)
# overall metrics
one_grain_metrics_df = _compute_metrics(fcst_df, metric_names)
one_grain_metrics_df[GRAIN] = ALL_GRAINS
metrics_list.append(one_grain_metrics_df)
# collect into a data frame
return pd.concat(metrics_list)
def _draw_one_plot(
df: pd.DataFrame,
time_column_name: str,
grain_column_names: List[str],
pdf: PdfPages,
) -> None:
"""
Draw the single plot.
:param df: The data frame with the data to build plot.
:param time_column_name: The name of a time column.
:param grain_column_names: The name of grain columns.
:param pdf: The pdf backend used to render the plot.
"""
fig, _ = plt.subplots(figsize=(20, 10))
df = df.set_index(time_column_name)
plt.plot(df[[ACTUALS, PREDICTIONS]])
plt.xticks(rotation=45)
iteration = df[BACKTEST_ITER].iloc[0]
if grain_column_names:
grain_name = [df[grain].iloc[0] for grain in grain_column_names]
plt.title(f"Time series ID: {_format_grain_name(grain_name)} {iteration}")
plt.legend(["actual", "forecast"])
plt.close(fig)
pdf.savefig(fig)
def calculate_scores_and_build_plots(
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
):
os.makedirs(output_dir, exist_ok=True)
grains = automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
if grains is None:
grains = []
if isinstance(grains, str):
grains = [grains]
while BACKTEST_ITER in grains:
grains.remove(BACKTEST_ITER)
dfs = []
for fle in os.listdir(input_dir):
file_path = os.path.join(input_dir, fle)
if os.path.isfile(file_path) and file_path.endswith(".csv"):
df_iter = pd.read_csv(file_path, parse_dates=[time_column_name])
for _, iteration in df_iter.groupby(BACKTEST_ITER):
dfs.append(iteration)
forecast_df = pd.concat(dfs, sort=False, ignore_index=True)
# To make sure plots are in order, sort the predictions by grain and iteration.
ts_index = grains + [BACKTEST_ITER]
forecast_df.sort_values(by=ts_index, inplace=True)
pdf = PdfPages(os.path.join(output_dir, PLOTS_FILE))
for _, one_forecast in forecast_df.groupby(ts_index):
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
pdf.close()
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
if __name__ == "__main__":
args = {"forecasts": "--forecasts", "scores_out": "--output-dir"}
parser = argparse.ArgumentParser("Parsing input arguments.")
for argname, arg in args.items():
parser.add_argument(arg, dest=argname, required=True)
parsed_args, _ = parser.parse_known_args()
input_dir = parsed_args.forecasts
output_dir = parsed_args.scores_out
with open(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
)
) as json_file:
automl_settings = json.load(json_file)
calculate_scores_and_build_plots(input_dir, output_dir, automl_settings)

View File

@@ -0,0 +1,719 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License.\n",
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/automl-forecasting-function.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated MachineLearning\n",
"_**The model backtesting**_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"2. [Setup](#Setup)\n",
"3. [Data](#Data)\n",
"4. [Prepare remote compute and data.](#prepare_remote)\n",
"5. [Create the configuration for AutoML backtesting](#train)\n",
"6. [Backtest AutoML](#backtest_automl)\n",
"7. [View metrics](#Metrics)\n",
"8. [Backtest the best model](#backtest_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"Model backtesting is used to evaluate its performance on historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n",
"This notebook is intended to demonstrate backtesting on a single model, this is the best solution for small data sets with a few or one time series in it. For scenarios where we would like to choose the best AutoML model for every backtest iteration, please see [AutoML Forecasting Backtest Many Models Example](../forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb) notebook.\n",
"![Backtesting](Backtesting.png)\n",
"This notebook demonstrates two ways of backtesting:\n",
"- AutoML backtesting: we will train separate AutoML models for historical data\n",
"- Model backtesting: from the first run we will select the best model trained on the most recent data, retrain it on the past data and evaluate."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import numpy as np\n",
"import pandas as pd\n",
"import shutil\n",
"\n",
"import azureml.core\n",
"from azureml.core import Experiment, Model, Workspace"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook is compatible with Azure ML SDK version 1.35.1 or later."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As part of the setup you have already created a <b>Workspace</b>."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"output = {}\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"SKU\"] = ws.sku\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Data\n",
"For the demonstration purposes we will simulate one year of daily data. To do this we need to specify the following parameters: time column name, time series ID column names and label column name. Our intention is to forecast for two weeks ahead."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TIME_COLUMN_NAME = \"date\"\n",
"TIME_SERIES_ID_COLUMN_NAMES = \"time_series_id\"\n",
"LABEL_COLUMN_NAME = \"y\"\n",
"FORECAST_HORIZON = 14\n",
"FREQUENCY = \"D\"\n",
"\n",
"\n",
"def simulate_timeseries_data(\n",
" train_len: int,\n",
" test_len: int,\n",
" time_column_name: str,\n",
" target_column_name: str,\n",
" time_series_id_column_name: str,\n",
" time_series_number: int = 1,\n",
" freq: str = \"H\",\n",
"):\n",
" \"\"\"\n",
" Return the time series of designed length.\n",
"\n",
" :param train_len: The length of training data (one series).\n",
" :type train_len: int\n",
" :param test_len: The length of testing data (one series).\n",
" :type test_len: int\n",
" :param time_column_name: The desired name of a time column.\n",
" :type time_column_name: str\n",
" :param time_series_number: The number of time series in the data set.\n",
" :type time_series_number: int\n",
" :param freq: The frequency string representing pandas offset.\n",
" see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n",
" :type freq: str\n",
" :returns: the tuple of train and test data sets.\n",
" :rtype: tuple\n",
"\n",
" \"\"\"\n",
" data_train = [] # type: List[pd.DataFrame]\n",
" data_test = [] # type: List[pd.DataFrame]\n",
" data_length = train_len + test_len\n",
" for i in range(time_series_number):\n",
" X = pd.DataFrame(\n",
" {\n",
" time_column_name: pd.date_range(\n",
" start=\"2000-01-01\", periods=data_length, freq=freq\n",
" ),\n",
" target_column_name: np.arange(data_length).astype(float)\n",
" + np.random.rand(data_length)\n",
" + i * 5,\n",
" \"ext_predictor\": np.asarray(range(42, 42 + data_length)),\n",
" time_series_id_column_name: np.repeat(\"ts{}\".format(i), data_length),\n",
" }\n",
" )\n",
" data_train.append(X[:train_len])\n",
" data_test.append(X[train_len:])\n",
" train = pd.concat(data_train)\n",
" label_train = train.pop(target_column_name).values\n",
" test = pd.concat(data_test)\n",
" label_test = test.pop(target_column_name).values\n",
" return train, label_train, test, label_test\n",
"\n",
"\n",
"n_test_periods = FORECAST_HORIZON\n",
"n_train_periods = 365\n",
"X_train, y_train, X_test, y_test = simulate_timeseries_data(\n",
" train_len=n_train_periods,\n",
" test_len=n_test_periods,\n",
" time_column_name=TIME_COLUMN_NAME,\n",
" target_column_name=LABEL_COLUMN_NAME,\n",
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAMES,\n",
" time_series_number=2,\n",
" freq=FREQUENCY,\n",
")\n",
"X_train[LABEL_COLUMN_NAME] = y_train"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's see what the training data looks like."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_train.tail()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prepare remote compute and data. <a id=\"prepare_remote\"></a>\n",
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace), is paired with the storage account, which contains the default data store. We will use it to upload the artificial data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"ds = ws.get_default_datastore()\n",
"# Upload saved data to the default data store.\n",
"train_data = TabularDatasetFactory.register_pandas_dataframe(\n",
" X_train, target=(ds, \"data\"), name=\"data_backtest\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You will need to create a compute target for backtesting. In this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute), you create AmlCompute as your training compute resource.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# Choose a name for your CPU cluster\n",
"amlcompute_cluster_name = \"backtest-cluster\"\n",
"\n",
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create the configuration for AutoML backtesting <a id=\"train\"></a>\n",
"\n",
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n",
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>normalized_root_mean_squared_error</i><br><i>normalized_mean_absolute_error</i> |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
"| **label_column_name** | The name of the label column. |\n",
"| **max_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **grain_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_settings = {\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
" \"iterations\": 15,\n",
" \"experiment_timeout_hours\": 1, # This also needs to be changed based on the dataset. For larger data set this number needs to be bigger.\n",
" \"label_column_name\": LABEL_COLUMN_NAME,\n",
" \"n_cross_validations\": 3,\n",
" \"time_column_name\": TIME_COLUMN_NAME,\n",
" \"max_horizon\": FORECAST_HORIZON,\n",
" \"track_child_runs\": False,\n",
" \"grain_column_names\": TIME_SERIES_ID_COLUMN_NAMES,\n",
"}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Backtest AutoML <a id=\"backtest_automl\"></a>\n",
"First we set backtesting parameters: we will step back by 30 days and will make 5 such steps; for each step we will forecast for next two weeks."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# The number of periods to step back on each backtest iteration.\n",
"BACKTESTING_PERIOD = 30\n",
"# The number of times we will back test the model.\n",
"NUMBER_OF_BACKTESTS = 5"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To train AutoML on backtesting folds we will use the [Azure Machine Learning pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/concept-ml-pipelines). It will generate backtest folds, then train model for each of them and calculate the accuracy metrics. To run pipeline, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve (here, it is a forecasting), while a Run corresponds to a specific approach to the problem."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from uuid import uuid1\n",
"\n",
"from pipeline_helper import get_backtest_pipeline\n",
"\n",
"pipeline_exp = Experiment(ws, \"automl-backtesting\")\n",
"\n",
"# We will create the unique identifier to mark our models.\n",
"model_uid = str(uuid1())\n",
"\n",
"pipeline = get_backtest_pipeline(\n",
" experiment=pipeline_exp,\n",
" dataset=train_data,\n",
" # The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.\n",
" process_per_node=2,\n",
" # The maximum number of nodes for our compute is 6.\n",
" node_count=6,\n",
" compute_target=compute_target,\n",
" automl_settings=automl_settings,\n",
" step_size=BACKTESTING_PERIOD,\n",
" step_number=NUMBER_OF_BACKTESTS,\n",
" model_uid=model_uid,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Run the pipeline and wait for results."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run = pipeline_exp.submit(pipeline)\n",
"pipeline_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"After the run is complete, we can download the results. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics_output = pipeline_run.get_pipeline_output(\"results\")\n",
"metrics_output.download(\"backtest_metrics\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## View metrics<a id=\"Metrics\"></a>\n",
"To distinguish these metrics from the model backtest, which we will obtain in the next section, we will move the directory with metrics out of the backtest_metrics and will remove the parent folder. We will create the utility function for that."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def copy_scoring_directory(new_name):\n",
" scores_path = os.path.join(\"backtest_metrics\", \"azureml\")\n",
" directory_list = [os.path.join(scores_path, d) for d in os.listdir(scores_path)]\n",
" latest_file = max(directory_list, key=os.path.getctime)\n",
" print(\n",
" f\"The output directory {latest_file} was created on {pd.Timestamp(os.path.getctime(latest_file), unit='s')} GMT.\"\n",
" )\n",
" shutil.move(os.path.join(latest_file, \"results\"), new_name)\n",
" shutil.rmtree(\"backtest_metrics\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Move the directory and list its contents."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"copy_scoring_directory(\"automl_backtest\")\n",
"pd.DataFrame({\"File\": os.listdir(\"automl_backtest\")})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The directory contains a set of files with results:\n",
"- forecast.csv contains forecasts for all backtest iterations. The backtest_iteration column contains iteration identifier with the last training date as a suffix\n",
"- scores.csv contains all metrics. If data set contains several time series, the metrics are given for all combinations of time series id and iterations, as well as scores for all iterations and time series id are marked as \"all_sets\"\n",
"- plots_fcst_vs_actual.pdf contains the predictions vs forecast plots for each iteration and time series.\n",
"\n",
"For demonstration purposes we will display the table of metrics for one of the time series with ID \"ts0\". Again, we will create the utility function, which will be re used in model backtesting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_metrics_for_ts(all_metrics, ts):\n",
" \"\"\"\n",
" Get the metrics for the time series with ID ts and return it as pandas data frame.\n",
"\n",
" :param all_metrics: The table with all the metrics.\n",
" :param ts: The ID of a time series of interest.\n",
" :return: The pandas DataFrame with metrics for one time series.\n",
" \"\"\"\n",
" results_df = None\n",
" for ts_id, one_series in all_metrics.groupby(\"time_series_id\"):\n",
" if not ts_id.startswith(ts):\n",
" continue\n",
" iteration = ts_id.split(\"|\")[-1]\n",
" df = one_series[[\"metric_name\", \"metric\"]]\n",
" df.rename({\"metric\": iteration}, axis=1, inplace=True)\n",
" df.set_index(\"metric_name\", inplace=True)\n",
" if results_df is None:\n",
" results_df = df\n",
" else:\n",
" results_df = results_df.merge(\n",
" df, how=\"inner\", left_index=True, right_index=True\n",
" )\n",
" results_df.sort_index(axis=1, inplace=True)\n",
" return results_df\n",
"\n",
"\n",
"metrics_df = pd.read_csv(os.path.join(\"automl_backtest\", \"scores.csv\"))\n",
"ts_id = \"ts0\"\n",
"get_metrics_for_ts(metrics_df, ts_id)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Forecast vs actuals plots."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import IFrame\n",
"\n",
"IFrame(\"./automl_backtest/plots_fcst_vs_actual.pdf\", width=800, height=300)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# <font color='blue'>Backtest the best model</font> <a id=\"backtest_model\"></a>\n",
"\n",
"For model backtesting we will use the same parameters we used to backtest AutoML. All the models, we have obtained in the previous run were registered in our workspace. To identify the model, each was assigned a tag with the last trainig date."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_list = Model.list(ws, tags={\"experiment\": \"automl-backtesting\"})\n",
"model_data = {\"name\": [], \"last_training_date\": []}\n",
"for model in model_list:\n",
" if (\n",
" \"last_training_date\" not in model.tags\n",
" or \"model_uid\" not in model.tags\n",
" or model.tags[\"model_uid\"] != model_uid\n",
" ):\n",
" continue\n",
" model_data[\"name\"].append(model.name)\n",
" model_data[\"last_training_date\"].append(\n",
" pd.Timestamp(model.tags[\"last_training_date\"])\n",
" )\n",
"df_models = pd.DataFrame(model_data)\n",
"df_models.sort_values([\"last_training_date\"], inplace=True)\n",
"df_models.reset_index(inplace=True, drop=True)\n",
"df_models"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We will backtest the model trained on the most recet data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_name = df_models[\"name\"].iloc[-1]\n",
"model_name"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Retrain the models.\n",
"Assemble the pipeline, which will retrain the best model from AutoML run on historical data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_exp = Experiment(ws, \"model-backtesting\")\n",
"\n",
"pipeline = get_backtest_pipeline(\n",
" experiment=pipeline_exp,\n",
" dataset=train_data,\n",
" # The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.\n",
" process_per_node=2,\n",
" # The maximum number of nodes for our compute is 6.\n",
" node_count=6,\n",
" compute_target=compute_target,\n",
" automl_settings=automl_settings,\n",
" step_size=BACKTESTING_PERIOD,\n",
" step_number=NUMBER_OF_BACKTESTS,\n",
" model_name=model_name,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Launch the backtesting pipeline."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run = pipeline_exp.submit(pipeline)\n",
"pipeline_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The metrics are stored in the pipeline output named \"score\". The next code will download the table with metrics."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics_output = pipeline_run.get_pipeline_output(\"results\")\n",
"metrics_output.download(\"backtest_metrics\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Again, we will copy the data files from the downloaded directory, but in this case we will call the folder \"model_backtest\"; it will contain the same files as the one for AutoML backtesting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"copy_scoring_directory(\"model_backtest\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we will display the metrics."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_metrics_df = pd.read_csv(os.path.join(\"model_backtest\", \"scores.csv\"))\n",
"get_metrics_for_ts(model_metrics_df, ts_id)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Forecast vs actuals plots."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import IFrame\n",
"\n",
"IFrame(\"./model_backtest/plots_fcst_vs_actual.pdf\", width=800, height=300)"
]
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"category": "tutorial",
"compute": [
"Remote"
],
"datasets": [
"None"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"framework": [
"Azure ML AutoML"
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-backtest-single-model
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,166 @@
from typing import Any, Dict, Optional
import os
import azureml.train.automl.runtime._hts.hts_runtime_utilities as hru
from azureml._restclient.jasmine_client import JasmineClient
from azureml.contrib.automl.pipeline.steps import utilities
from azureml.core import RunConfiguration
from azureml.core.compute import ComputeTarget
from azureml.core.experiment import Experiment
from azureml.data import LinkTabularOutputDatasetConfig, TabularDataset
from azureml.pipeline.core import Pipeline, PipelineData, PipelineParameter
from azureml.pipeline.steps import ParallelRunConfig, ParallelRunStep, PythonScriptStep
from azureml.train.automl.constants import Scenarios
from azureml.data.dataset_consumption_config import DatasetConsumptionConfig
PROJECT_FOLDER = "assets"
SETTINGS_FILE = "automl_settings.json"
def get_backtest_pipeline(
experiment: Experiment,
dataset: TabularDataset,
process_per_node: int,
node_count: int,
compute_target: ComputeTarget,
automl_settings: Dict[str, Any],
step_size: int,
step_number: int,
model_name: Optional[str] = None,
model_uid: Optional[str] = None,
) -> Pipeline:
"""
:param experiment: The experiment used to run the pipeline.
:param dataset: Tabular data set to be used for model training.
:param process_per_node: The number of processes per node. Generally it should be the number of cores
on the node divided by two.
:param node_count: The number of nodes to be used.
:param compute_target: The compute target to be used to run the pipeline.
:param model_name: The name of a model to be back tested.
:param automl_settings: The dictionary with automl settings.
:param step_size: The number of periods to step back in backtesting.
:param step_number: The number of backtesting iterations.
:param model_uid: The uid to mark models from this run of the experiment.
:return: The pipeline to be used for model retraining.
**Note:** The output will be uploaded in the pipeline output
called 'score'.
"""
jasmine_client = JasmineClient(
service_context=experiment.workspace.service_context,
experiment_name=experiment.name,
experiment_id=experiment.id,
)
env = jasmine_client.get_curated_environment(
scenario=Scenarios.AUTOML,
enable_dnn=False,
enable_gpu=False,
compute=compute_target,
compute_sku=experiment.workspace.compute_targets.get(
compute_target.name
).vm_size,
)
data_results = PipelineData(
name="results", datastore=None, pipeline_output_name="results"
)
############################################################
# Split the data set using python script.
############################################################
run_config = RunConfiguration()
run_config.docker.use_docker = True
run_config.environment = env
split_data = PipelineData(name="split_data_output", datastore=None).as_dataset()
split_step = PythonScriptStep(
name="split_data_for_backtest",
script_name="data_split.py",
inputs=[dataset.as_named_input("training_data")],
outputs=[split_data],
source_directory=PROJECT_FOLDER,
arguments=[
"--step-size",
step_size,
"--step-number",
step_number,
"--time-column-name",
automl_settings.get("time_column_name"),
"--time-series-id-column-names",
automl_settings.get("grain_column_names"),
"--output-dir",
split_data,
],
runconfig=run_config,
compute_target=compute_target,
allow_reuse=False,
)
############################################################
# We will do the backtest the parallel run step.
############################################################
settings_path = os.path.join(PROJECT_FOLDER, SETTINGS_FILE)
hru.dump_object_to_json(automl_settings, settings_path)
mini_batch_size = PipelineParameter(name="batch_size_param", default_value=str(1))
back_test_config = ParallelRunConfig(
source_directory=PROJECT_FOLDER,
entry_script="retrain_models.py",
mini_batch_size=mini_batch_size,
error_threshold=-1,
output_action="append_row",
append_row_file_name="outputs.txt",
compute_target=compute_target,
environment=env,
process_count_per_node=process_per_node,
run_invocation_timeout=3600,
node_count=node_count,
)
forecasts = PipelineData(name="forecasts", datastore=None)
if model_name:
parallel_step_name = "{}-backtest".format(model_name.replace("_", "-"))
else:
parallel_step_name = "AutoML-backtest"
prs_args = [
"--target_column_name",
automl_settings.get("label_column_name"),
"--output-dir",
forecasts,
]
if model_name is not None:
prs_args.append("--model-name")
prs_args.append(model_name)
if model_uid is not None:
prs_args.append("--model-uid")
prs_args.append(model_uid)
backtest_prs = ParallelRunStep(
name=parallel_step_name,
parallel_run_config=back_test_config,
arguments=prs_args,
inputs=[split_data],
output=forecasts,
allow_reuse=False,
)
############################################################
# Then we collect the output and return it as scores output.
############################################################
collection_step = PythonScriptStep(
name="score",
script_name="score.py",
inputs=[forecasts.as_mount()],
outputs=[data_results],
source_directory=PROJECT_FOLDER,
arguments=[
"--forecasts",
forecasts,
"--output-dir",
data_results,
],
runconfig=run_config,
compute_target=compute_target,
allow_reuse=False,
)
# Build and return the pipeline.
return Pipeline(
workspace=experiment.workspace,
steps=[split_step, backtest_prs, collection_step],
)

View File

@@ -113,7 +113,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.32.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -139,18 +139,18 @@
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for the run history container in the workspace\n",
"experiment_name = 'beer-remote-cpu'\n",
"experiment_name = \"beer-remote-cpu\"\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Run History Name'] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
@@ -185,10 +185,11 @@
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print('Found existing cluster, use it.')\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
" max_nodes=4)\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
@@ -245,17 +246,21 @@
"plt.tight_layout()\n",
"\n",
"plt.subplot(2, 1, 1)\n",
"plt.title('Beer Production By Year')\n",
"df = pd.read_csv(\"Beer_no_valid_split_train.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n",
"test_df = pd.read_csv(\"Beer_no_valid_split_test.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n",
"plt.title(\"Beer Production By Year\")\n",
"df = pd.read_csv(\n",
" \"Beer_no_valid_split_train.csv\", parse_dates=True, index_col=\"DATE\"\n",
").drop(columns=\"grain\")\n",
"test_df = pd.read_csv(\n",
" \"Beer_no_valid_split_test.csv\", parse_dates=True, index_col=\"DATE\"\n",
").drop(columns=\"grain\")\n",
"plt.plot(df)\n",
"\n",
"plt.subplot(2, 1, 2)\n",
"plt.title('Beer Production By Month')\n",
"plt.title(\"Beer Production By Month\")\n",
"groups = df.groupby(df.index.month)\n",
"months = concat([DataFrame(x[1].values) for x in groups], axis=1)\n",
"months = DataFrame(months)\n",
"months.columns = range(1,13)\n",
"months.columns = range(1, 13)\n",
"months.boxplot()\n",
"\n",
"plt.show()"
@@ -270,10 +275,10 @@
},
"outputs": [],
"source": [
"target_column_name = 'BeerProduction'\n",
"time_column_name = 'DATE'\n",
"target_column_name = \"BeerProduction\"\n",
"time_column_name = \"DATE\"\n",
"time_series_id_column_names = []\n",
"freq = 'M' #Monthly data"
"freq = \"M\" # Monthly data"
]
},
{
@@ -301,14 +306,36 @@
"test_df.to_csv(\"test.csv\")\n",
"\n",
"datastore = ws.get_default_datastore()\n",
"datastore.upload_files(files = ['./train.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
"datastore.upload_files(files = ['./valid.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
"datastore.upload_files(files = ['./test.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
"datastore.upload_files(\n",
" files=[\"./train.csv\"],\n",
" target_path=\"beer-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./valid.csv\"],\n",
" target_path=\"beer-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./test.csv\"],\n",
" target_path=\"beer-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"\n",
"from azureml.core import Dataset\n",
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/train.csv')])\n",
"valid_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/valid.csv')])\n",
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])"
"\n",
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"beer-dataset/tabular/train.csv\")]\n",
")\n",
"valid_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"beer-dataset/tabular/valid.csv\")]\n",
")\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"beer-dataset/tabular/test.csv\")]\n",
")"
]
},
{
@@ -366,24 +393,29 @@
"outputs": [],
"source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"\n",
"forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name,\n",
" forecast_horizon=forecast_horizon,\n",
" freq='MS' # Set the forecast frequency to be monthly (start of the month)\n",
" freq=\"MS\", # Set the forecast frequency to be monthly (start of the month)\n",
")\n",
"\n",
"automl_config = AutoMLConfig(task='forecasting', \n",
" primary_metric='normalized_root_mean_squared_error',\n",
" experiment_timeout_hours = 1,\n",
" training_data=train_dataset,\n",
" label_column_name=target_column_name,\n",
" validation_data=valid_dataset, \n",
" verbosity=logging.INFO,\n",
" compute_target=compute_target,\n",
" max_concurrent_iterations=4,\n",
" max_cores_per_iteration=-1,\n",
" enable_dnn=True,\n",
" forecasting_parameters=forecasting_parameters)"
"# We will disable the enable_early_stopping flag to ensure the DNN model is recommended for demonstration purpose.\n",
"automl_config = AutoMLConfig(\n",
" task=\"forecasting\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n",
" experiment_timeout_hours=1,\n",
" training_data=train_dataset,\n",
" label_column_name=target_column_name,\n",
" validation_data=valid_dataset,\n",
" verbosity=logging.INFO,\n",
" compute_target=compute_target,\n",
" max_concurrent_iterations=4,\n",
" max_cores_per_iteration=-1,\n",
" enable_dnn=True,\n",
" enable_early_stopping=False,\n",
" forecasting_parameters=forecasting_parameters,\n",
")"
]
},
{
@@ -405,7 +437,7 @@
},
"outputs": [],
"source": [
"remote_run = experiment.submit(automl_config, show_output= True)"
"remote_run = experiment.submit(automl_config, show_output=True)"
]
},
{
@@ -453,6 +485,7 @@
"outputs": [],
"source": [
"from helper import get_result_df\n",
"\n",
"summary_df = get_result_df(remote_run)\n",
"summary_df"
]
@@ -468,11 +501,12 @@
"source": [
"from azureml.core.run import Run\n",
"from azureml.widgets import RunDetails\n",
"forecast_model = 'TCNForecaster'\n",
"if not forecast_model in summary_df['run_id']:\n",
" forecast_model = 'ForecastTCN'\n",
" \n",
"best_dnn_run_id = summary_df['run_id'][forecast_model]\n",
"\n",
"forecast_model = \"TCNForecaster\"\n",
"if not forecast_model in summary_df[\"run_id\"]:\n",
" forecast_model = \"ForecastTCN\"\n",
"\n",
"best_dnn_run_id = summary_df[\"run_id\"][forecast_model]\n",
"best_dnn_run = Run(experiment, best_dnn_run_id)"
]
},
@@ -486,7 +520,7 @@
"outputs": [],
"source": [
"best_dnn_run.parent\n",
"RunDetails(best_dnn_run.parent).show() "
"RunDetails(best_dnn_run.parent).show()"
]
},
{
@@ -499,7 +533,7 @@
"outputs": [],
"source": [
"best_dnn_run\n",
"RunDetails(best_dnn_run).show() "
"RunDetails(best_dnn_run).show()"
]
},
{
@@ -534,7 +568,10 @@
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])\n",
"\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"beer-dataset/tabular/test.csv\")]\n",
")\n",
"# preview the first 3 rows of the dataset\n",
"test_dataset.take(5).to_pandas_dataframe()"
]
@@ -545,7 +582,7 @@
"metadata": {},
"outputs": [],
"source": [
"compute_target = ws.compute_targets['beer-cluster']\n",
"compute_target = ws.compute_targets[\"beer-cluster\"]\n",
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
]
},
@@ -561,9 +598,9 @@
"import os\n",
"import shutil\n",
"\n",
"script_folder = os.path.join(os.getcwd(), 'inference')\n",
"script_folder = os.path.join(os.getcwd(), \"inference\")\n",
"os.makedirs(script_folder, exist_ok=True)\n",
"shutil.copy('infer.py', script_folder)"
"shutil.copy(\"infer.py\", script_folder)"
]
},
{
@@ -574,8 +611,18 @@
"source": [
"from helper import run_inference\n",
"\n",
"test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run, test_dataset, valid_dataset, forecast_horizon,\n",
" target_column_name, time_column_name, freq)"
"test_run = run_inference(\n",
" test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" best_dnn_run,\n",
" test_dataset,\n",
" valid_dataset,\n",
" forecast_horizon,\n",
" target_column_name,\n",
" time_column_name,\n",
" freq,\n",
")"
]
},
{
@@ -595,8 +642,19 @@
"source": [
"from helper import run_multiple_inferences\n",
"\n",
"summary_df = run_multiple_inferences(summary_df, experiment, test_experiment, compute_target, script_folder, test_dataset, \n",
" valid_dataset, forecast_horizon, target_column_name, time_column_name, freq)"
"summary_df = run_multiple_inferences(\n",
" summary_df,\n",
" experiment,\n",
" test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" test_dataset,\n",
" valid_dataset,\n",
" forecast_horizon,\n",
" target_column_name,\n",
" time_column_name,\n",
" freq,\n",
")"
]
},
{
@@ -616,7 +674,7 @@
" test_run = Run(test_experiment, test_run_id)\n",
" test_run.wait_for_completion()\n",
" test_score = test_run.get_metrics()[run_summary.primary_metric]\n",
" summary_df.loc[summary_df.run_id == run_id, 'Test Score'] = test_score\n",
" summary_df.loc[summary_df.run_id == run_id, \"Test Score\"] = test_score\n",
" print(\"Test Score: \", test_score)"
]
},

View File

@@ -6,120 +6,158 @@ from azureml.core.run import Run
from azureml.automl.core.shared import constants
def split_fraction_by_grain(df, fraction, time_column_name,
grain_column_names=None):
def split_fraction_by_grain(df, fraction, time_column_name, grain_column_names=None):
if not grain_column_names:
df['tmp_grain_column'] = 'grain'
grain_column_names = ['tmp_grain_column']
df["tmp_grain_column"] = "grain"
grain_column_names = ["tmp_grain_column"]
"""Group df by grain and split on last n rows for each group."""
df_grouped = (df.sort_values(time_column_name)
.groupby(grain_column_names, group_keys=False))
df_grouped = df.sort_values(time_column_name).groupby(
grain_column_names, group_keys=False
)
df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-int(len(dfg) *
fraction)] if fraction > 0 else dfg)
df_head = df_grouped.apply(
lambda dfg: dfg.iloc[: -int(len(dfg) * fraction)] if fraction > 0 else dfg
)
df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-int(len(dfg) *
fraction):] if fraction > 0 else dfg[:0])
df_tail = df_grouped.apply(
lambda dfg: dfg.iloc[-int(len(dfg) * fraction) :] if fraction > 0 else dfg[:0]
)
if 'tmp_grain_column' in grain_column_names:
if "tmp_grain_column" in grain_column_names:
for df2 in (df, df_head, df_tail):
df2.drop('tmp_grain_column', axis=1, inplace=True)
df2.drop("tmp_grain_column", axis=1, inplace=True)
grain_column_names.remove('tmp_grain_column')
grain_column_names.remove("tmp_grain_column")
return df_head, df_tail
def split_full_for_forecasting(df, time_column_name,
grain_column_names=None, test_split=0.2):
def split_full_for_forecasting(
df, time_column_name, grain_column_names=None, test_split=0.2
):
index_name = df.index.name
# Assumes that there isn't already a column called tmpindex
df['tmpindex'] = df.index
df["tmpindex"] = df.index
train_df, test_df = split_fraction_by_grain(
df, test_split, time_column_name, grain_column_names)
df, test_split, time_column_name, grain_column_names
)
train_df = train_df.set_index('tmpindex')
train_df = train_df.set_index("tmpindex")
train_df.index.name = index_name
test_df = test_df.set_index('tmpindex')
test_df = test_df.set_index("tmpindex")
test_df.index.name = index_name
df.drop('tmpindex', axis=1, inplace=True)
df.drop("tmpindex", axis=1, inplace=True)
return train_df, test_df
def get_result_df(remote_run):
children = list(remote_run.get_children(recursive=True))
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm',
'primary_metric', 'Score'])
summary_df = pd.DataFrame(
index=["run_id", "run_algorithm", "primary_metric", "Score"]
)
goal_minimize = False
for run in children:
if run.get_status().lower() == constants.RunState.COMPLETE_RUN \
and 'run_algorithm' in run.properties and 'score' in run.properties:
if (
run.get_status().lower() == constants.RunState.COMPLETE_RUN
and "run_algorithm" in run.properties
and "score" in run.properties
):
# We only count in the completed child runs.
summary_df[run.id] = [run.id, run.properties['run_algorithm'],
run.properties['primary_metric'],
float(run.properties['score'])]
if ('goal' in run.properties):
goal_minimize = run.properties['goal'].split('_')[-1] == 'min'
summary_df[run.id] = [
run.id,
run.properties["run_algorithm"],
run.properties["primary_metric"],
float(run.properties["score"]),
]
if "goal" in run.properties:
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
summary_df = summary_df.T.sort_values(
'Score',
ascending=goal_minimize).drop_duplicates(['run_algorithm'])
summary_df = summary_df.set_index('run_algorithm')
"Score", ascending=goal_minimize
).drop_duplicates(["run_algorithm"])
summary_df = summary_df.set_index("run_algorithm")
return summary_df
def run_inference(test_experiment, compute_target, script_folder, train_run,
test_dataset, lookback_dataset, max_horizon,
target_column_name, time_column_name, freq):
model_base_name = 'model.pkl'
if 'model_data_location' in train_run.properties:
model_location = train_run.properties['model_data_location']
_, model_base_name = model_location.rsplit('/', 1)
train_run.download_file('outputs/{}'.format(model_base_name), 'inference/{}'.format(model_base_name))
train_run.download_file('outputs/conda_env_v_1_0_0.yml', 'inference/condafile.yml')
def run_inference(
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
):
model_base_name = "model.pkl"
if "model_data_location" in train_run.properties:
model_location = train_run.properties["model_data_location"]
_, model_base_name = model_location.rsplit("/", 1)
train_run.download_file(
"outputs/{}".format(model_base_name), "inference/{}".format(model_base_name)
)
train_run.download_file("outputs/conda_env_v_1_0_0.yml", "inference/condafile.yml")
inference_env = Environment("myenv")
inference_env.docker.enabled = True
inference_env.python.conda_dependencies = CondaDependencies(
conda_dependencies_file_path='inference/condafile.yml')
conda_dependencies_file_path="inference/condafile.yml"
)
est = Estimator(source_directory=script_folder,
entry_script='infer.py',
script_params={
'--max_horizon': max_horizon,
'--target_column_name': target_column_name,
'--time_column_name': time_column_name,
'--frequency': freq,
'--model_path': model_base_name
},
inputs=[test_dataset.as_named_input('test_data'),
lookback_dataset.as_named_input('lookback_data')],
compute_target=compute_target,
environment_definition=inference_env)
est = Estimator(
source_directory=script_folder,
entry_script="infer.py",
script_params={
"--max_horizon": max_horizon,
"--target_column_name": target_column_name,
"--time_column_name": time_column_name,
"--frequency": freq,
"--model_path": model_base_name,
},
inputs=[
test_dataset.as_named_input("test_data"),
lookback_dataset.as_named_input("lookback_data"),
],
compute_target=compute_target,
environment_definition=inference_env,
)
run = test_experiment.submit(
est, tags={
'training_run_id': train_run.id,
'run_algorithm': train_run.properties['run_algorithm'],
'valid_score': train_run.properties['score'],
'primary_metric': train_run.properties['primary_metric']
})
est,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags['run_algorithm'])
run.log("run_algorithm", run.tags["run_algorithm"])
return run
def run_multiple_inferences(summary_df, train_experiment, test_experiment,
compute_target, script_folder, test_dataset,
lookback_dataset, max_horizon, target_column_name,
time_column_name, freq):
def run_multiple_inferences(
summary_df,
train_experiment,
test_experiment,
compute_target,
script_folder,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
):
for run_name, run_summary in summary_df.iterrows():
print(run_name)
print(run_summary)
@@ -127,12 +165,19 @@ def run_multiple_inferences(summary_df, train_experiment, test_experiment,
train_run = Run(train_experiment, run_id)
test_run = run_inference(
test_experiment, compute_target, script_folder, train_run,
test_dataset, lookback_dataset, max_horizon, target_column_name,
time_column_name, freq)
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
)
print(test_run)
summary_df.loc[summary_df.run_id == run_id,
'test_run_id'] = test_run.id
summary_df.loc[summary_df.run_id == run_id, "test_run_id"] = test_run.id
return summary_df

View File

@@ -19,9 +19,14 @@ except ImportError:
_torch_present = False
def align_outputs(y_predicted, X_trans, X_test, y_test,
predicted_column_name='predicted',
horizon_colname='horizon_origin'):
def align_outputs(
y_predicted,
X_trans,
X_test,
y_test,
predicted_column_name="predicted",
horizon_colname="horizon_origin",
):
"""
Demonstrates how to get the output aligned to the inputs
using pandas indexes. Helps understand what happened if
@@ -33,9 +38,13 @@ def align_outputs(y_predicted, X_trans, X_test, y_test,
* model was asked to predict past max_horizon -> increase max horizon
* data at start of X_test was needed for lags -> provide previous periods
"""
if (horizon_colname in X_trans):
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname]})
if horizon_colname in X_trans:
df_fcst = pd.DataFrame(
{
predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname],
}
)
else:
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
@@ -48,20 +57,21 @@ def align_outputs(y_predicted, X_trans, X_test, y_test,
# X_test_full's index does not include origin, so reset for merge
df_fcst.reset_index(inplace=True)
X_test_full = X_test_full.reset_index().drop(columns='index')
together = df_fcst.merge(X_test_full, how='right')
X_test_full = X_test_full.reset_index().drop(columns="index")
together = df_fcst.merge(X_test_full, how="right")
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = together[together[[target_column_name,
predicted_column_name]].notnull().all(axis=1)]
return (clean)
clean = together[
together[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
return clean
def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
max_horizon, X_lookback, y_lookback,
freq='D'):
def do_rolling_forecast_with_lookback(
fitted_model, X_test, y_test, max_horizon, X_lookback, y_lookback, freq="D"
):
"""
Produce forecasts on a rolling origin over the given test set.
@@ -72,7 +82,7 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
origin time for constructing lag features.
This function returns a concatenated DataFrame of rolling forecasts.
"""
"""
print("Using lookback of size: ", y_lookback.size)
df_list = []
origin_time = X_test[time_column_name].min()
@@ -83,22 +93,28 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = (X[time_column_name] < horizon_time)
expand_wind = X[time_column_name] < horizon_time
X_test_expand = X[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN)
if origin_time != X[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = (X[time_column_name] < origin_time)
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
test_context_expand_wind = X[time_column_name] < origin_time
context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y[test_context_expand_wind]
# Print some debug info
print("Horizon_time:", horizon_time,
" origin_time: ", origin_time,
" max_horizon: ", max_horizon,
" freq: ", freq)
print(
"Horizon_time:",
horizon_time,
" origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
@@ -124,9 +140,14 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
df_list.append(align_outputs(
y_fcst[trans_roll_wind], X_trans[trans_roll_wind],
X[test_roll_wind], y[test_roll_wind]))
df_list.append(
align_outputs(
y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X[test_roll_wind],
y[test_roll_wind],
)
)
# Advance the origin time
origin_time = horizon_time
@@ -134,7 +155,7 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
return pd.concat(df_list, ignore_index=True)
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq="D"):
"""
Produce forecasts on a rolling origin over the given test set.
@@ -145,7 +166,7 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
origin time for constructing lag features.
This function returns a concatenated DataFrame of rolling forecasts.
"""
"""
df_list = []
origin_time = X_test[time_column_name].min()
while origin_time <= X_test[time_column_name].max():
@@ -153,23 +174,28 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = (X_test[time_column_name] < horizon_time)
expand_wind = X_test[time_column_name] < horizon_time
X_test_expand = X_test[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN)
if origin_time != X_test[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = (X_test[time_column_name] < origin_time)
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
y_query_expand[context_expand_wind] = y_test[
test_context_expand_wind]
test_context_expand_wind = X_test[time_column_name] < origin_time
context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y_test[test_context_expand_wind]
# Print some debug info
print("Horizon_time:", horizon_time,
" origin_time: ", origin_time,
" max_horizon: ", max_horizon,
" freq: ", freq)
print(
"Horizon_time:",
horizon_time,
" origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
@@ -193,10 +219,14 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X_test[time_column_name] >= origin_time)
df_list.append(align_outputs(y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X_test[test_roll_wind],
y_test[test_roll_wind]))
df_list.append(
align_outputs(
y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X_test[test_roll_wind],
y_test[test_roll_wind],
)
)
# Advance the origin time
origin_time = horizon_time
@@ -230,20 +260,31 @@ def map_location_cuda(storage, loc):
parser = argparse.ArgumentParser()
parser.add_argument(
'--max_horizon', type=int, dest='max_horizon',
default=10, help='Max Horizon for forecasting')
"--max_horizon",
type=int,
dest="max_horizon",
default=10,
help="Max Horizon for forecasting",
)
parser.add_argument(
'--target_column_name', type=str, dest='target_column_name',
help='Target Column Name')
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
'--time_column_name', type=str, dest='time_column_name',
help='Time Column Name')
"--time_column_name", type=str, dest="time_column_name", help="Time Column Name"
)
parser.add_argument(
'--frequency', type=str, dest='freq',
help='Frequency of prediction')
"--frequency", type=str, dest="freq", help="Frequency of prediction"
)
parser.add_argument(
'--model_path', type=str, dest='model_path',
default='model.pkl', help='Filename of model to be loaded')
"--model_path",
type=str,
dest="model_path",
default="model.pkl",
help="Filename of model to be loaded",
)
args = parser.parse_args()
max_horizon = args.max_horizon
@@ -252,7 +293,7 @@ time_column_name = args.time_column_name
freq = args.freq
model_path = args.model_path
print('args passed are: ')
print("args passed are: ")
print(max_horizon)
print(target_column_name)
print(time_column_name)
@@ -261,39 +302,41 @@ print(model_path)
run = Run.get_context()
# get input dataset by name
test_dataset = run.input_datasets['test_data']
lookback_dataset = run.input_datasets['lookback_data']
test_dataset = run.input_datasets["test_data"]
lookback_dataset = run.input_datasets["lookback_data"]
grain_column_names = []
df = test_dataset.to_pandas_dataframe()
print('Read df')
print("Read df")
print(df)
X_test_df = test_dataset.drop_columns(columns=[target_column_name])
y_test_df = test_dataset.with_timestamp_columns(
None).keep_columns(columns=[target_column_name])
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(
columns=[target_column_name]
)
X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name])
y_lookback_df = lookback_dataset.with_timestamp_columns(
None).keep_columns(columns=[target_column_name])
y_lookback_df = lookback_dataset.with_timestamp_columns(None).keep_columns(
columns=[target_column_name]
)
_, ext = os.path.splitext(model_path)
if ext == '.pt':
if ext == ".pt":
# Load the fc-tcn torch model.
assert _torch_present
if torch.cuda.is_available():
map_location = map_location_cuda
else:
map_location = 'cpu'
with open(model_path, 'rb') as fh:
map_location = "cpu"
with open(model_path, "rb") as fh:
fitted_model = torch.load(fh, map_location=map_location)
else:
# Load the sklearn pipeline.
fitted_model = joblib.load(model_path)
if hasattr(fitted_model, 'get_lookback'):
if hasattr(fitted_model, "get_lookback"):
lookback = fitted_model.get_lookback()
df_all = do_rolling_forecast_with_lookback(
fitted_model,
@@ -302,26 +345,28 @@ if hasattr(fitted_model, 'get_lookback'):
max_horizon,
X_lookback_df.to_pandas_dataframe()[-lookback:],
y_lookback_df.to_pandas_dataframe().values.T[0][-lookback:],
freq)
freq,
)
else:
df_all = do_rolling_forecast(
fitted_model,
X_test_df.to_pandas_dataframe(),
y_test_df.to_pandas_dataframe().values.T[0],
max_horizon,
freq)
freq,
)
print(df_all)
print("target values:::")
print(df_all[target_column_name])
print("predicted values:::")
print(df_all['predicted'])
print(df_all["predicted"])
# Use the AutoML scoring module
regression_metrics = list(constants.REGRESSION_SCALAR_SET)
y_test = np.array(df_all[target_column_name])
y_pred = np.array(df_all['predicted'])
y_pred = np.array(df_all["predicted"])
scores = scoring.score_regression(y_test, y_pred, regression_metrics)
print("scores:")
@@ -331,12 +376,11 @@ for key, value in scores.items():
run.log(key, value)
print("Simple forecasting model")
rmse = np.sqrt(mean_squared_error(
df_all[target_column_name], df_all['predicted']))
rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all["predicted"]))
print("[Test Data] \nRoot Mean squared error: %.2f" % rmse)
mae = mean_absolute_error(df_all[target_column_name], df_all['predicted'])
print('mean_absolute_error score: %.2f' % mae)
print('MAPE: %.2f' % MAPE(df_all[target_column_name], df_all['predicted']))
mae = mean_absolute_error(df_all[target_column_name], df_all["predicted"])
print("mean_absolute_error score: %.2f" % mae)
print("MAPE: %.2f" % MAPE(df_all[target_column_name], df_all["predicted"]))
run.log('rmse', rmse)
run.log('mae', mae)
run.log("rmse", rmse)
run.log("mae", mae)

View File

@@ -4,11 +4,14 @@ from sklearn.externals import joblib
parser = argparse.ArgumentParser()
parser.add_argument(
'--target_column_name', type=str, dest='target_column_name',
help='Target Column Name')
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
'--test_dataset', type=str, dest='test_dataset',
help='Test Dataset')
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
)
args = parser.parse_args()
target_column_name = args.target_column_name
@@ -20,19 +23,30 @@ ws = run.experiment.workspace
# get the input dataset by id
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
X_test_df = test_dataset.drop_columns(columns=[target_column_name]).to_pandas_dataframe().reset_index(drop=True)
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(columns=[target_column_name]).to_pandas_dataframe()
X_test_df = (
test_dataset.drop_columns(columns=[target_column_name])
.to_pandas_dataframe()
.reset_index(drop=True)
)
y_test_df = (
test_dataset.with_timestamp_columns(None)
.keep_columns(columns=[target_column_name])
.to_pandas_dataframe()
)
fitted_model = joblib.load('model.pkl')
fitted_model = joblib.load("model.pkl")
y_pred, X_trans = fitted_model.rolling_evaluation(X_test_df, y_test_df.values)
# Add predictions, actuals, and horizon relative to rolling origin to the test feature data
assign_dict = {'horizon_origin': X_trans['horizon_origin'].values, 'predicted': y_pred,
target_column_name: y_test_df[target_column_name].values}
assign_dict = {
"horizon_origin": X_trans["horizon_origin"].values,
"predicted": y_pred,
target_column_name: y_test_df[target_column_name].values,
}
df_all = X_test_df.assign(**assign_dict)
file_name = 'outputs/predictions.csv'
file_name = "outputs/predictions.csv"
export_csv = df_all.to_csv(file_name, header=True)
# Upload the predictions into artifacts

View File

@@ -1,32 +1,40 @@
from azureml.core import ScriptRunConfig
def run_rolling_forecast(test_experiment, compute_target, train_run,
test_dataset, target_column_name,
inference_folder='./forecast'):
train_run.download_file('outputs/model.pkl',
inference_folder + '/model.pkl')
def run_rolling_forecast(
test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
train_run.download_file("outputs/model.pkl", inference_folder + "/model.pkl")
inference_env = train_run.get_environment()
config = ScriptRunConfig(source_directory=inference_folder,
script='forecasting_script.py',
arguments=['--target_column_name',
target_column_name,
'--test_dataset',
test_dataset.as_named_input(test_dataset.name)],
compute_target=compute_target,
environment=inference_env)
config = ScriptRunConfig(
source_directory=inference_folder,
script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name,
"--test_dataset",
test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target,
environment=inference_env,
)
run = test_experiment.submit(config,
tags={'training_run_id':
train_run.id,
'run_algorithm':
train_run.properties['run_algorithm'],
'valid_score':
train_run.properties['score'],
'primary_metric':
train_run.properties['primary_metric']})
run = test_experiment.submit(
config,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags['run_algorithm'])
run.log("run_algorithm", run.tags["run_algorithm"])
return run

View File

@@ -5,62 +5,20 @@ compute instance.
"""
import argparse
import pandas as pd
import numpy as np
from azureml.core import Dataset, Run
from azureml.automl.core.shared.constants import TimeSeriesInternal
from sklearn.externals import joblib
from pandas.tseries.frequencies import to_offset
def align_outputs(y_predicted, X_trans, X_test, y_test, target_column_name,
predicted_column_name='predicted',
horizon_colname='horizon_origin'):
"""
Demonstrates how to get the output aligned to the inputs
using pandas indexes. Helps understand what happened if
the output's shape differs from the input shape, or if
the data got re-sorted by time and grain during forecasting.
Typical causes of misalignment are:
* we predicted some periods that were missing in actuals -> drop from eval
* model was asked to predict past max_horizon -> increase max horizon
* data at start of X_test was needed for lags -> provide previous periods
"""
if (horizon_colname in X_trans):
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname]})
else:
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
# y and X outputs are aligned by forecast() function contract
df_fcst.index = X_trans.index
# align original X_test to y_test
X_test_full = X_test.copy()
X_test_full[target_column_name] = y_test
# X_test_full's index does not include origin, so reset for merge
df_fcst.reset_index(inplace=True)
X_test_full = X_test_full.reset_index().drop(columns='index')
together = df_fcst.merge(X_test_full, how='right')
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = together[together[[target_column_name,
predicted_column_name]].notnull().all(axis=1)]
return(clean)
parser = argparse.ArgumentParser()
parser.add_argument(
'--target_column_name', type=str, dest='target_column_name',
help='Target Column Name')
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
'--test_dataset', type=str, dest='test_dataset',
help='Test Dataset')
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
)
args = parser.parse_args()
target_column_name = args.target_column_name
@@ -76,14 +34,28 @@ X_test = test_dataset.to_pandas_dataframe().reset_index(drop=True)
y_test = X_test.pop(target_column_name).values
# generate forecast
fitted_model = joblib.load('model.pkl')
y_predictions, X_trans = fitted_model.forecast(X_test)
fitted_model = joblib.load("model.pkl")
# We have default quantiles values set as below(95th percentile)
quantiles = [0.025, 0.5, 0.975]
predicted_column_name = "predicted"
PI = "prediction_interval"
fitted_model.quantiles = quantiles
pred_quantiles = fitted_model.forecast_quantiles(X_test)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
)
X_test[target_column_name] = y_test
X_test[PI] = pred_quantiles[PI]
X_test[predicted_column_name] = pred_quantiles[0.5]
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = X_test[
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
# align output
df_all = align_outputs(y_predictions, X_trans, X_test, y_test, target_column_name)
file_name = 'outputs/predictions.csv'
export_csv = df_all.to_csv(file_name, header=True, index=False) # added Index
file_name = "outputs/predictions.csv"
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
# Upload the predictions into artifacts
run.upload_file(name=file_name, path_or_stream=file_name)

View File

@@ -3,36 +3,47 @@ import shutil
from azureml.core import ScriptRunConfig
def run_remote_inference(test_experiment, compute_target, train_run,
test_dataset, target_column_name, inference_folder='./forecast'):
def run_remote_inference(
test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
# Create local directory to copy the model.pkl and forecsting_script.py files into.
# These files will be uploaded to and executed on the compute instance.
os.makedirs(inference_folder, exist_ok=True)
shutil.copy('forecasting_script.py', inference_folder)
shutil.copy("forecasting_script.py", inference_folder)
train_run.download_file('outputs/model.pkl',
os.path.join(inference_folder, 'model.pkl'))
train_run.download_file(
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
)
inference_env = train_run.get_environment()
config = ScriptRunConfig(source_directory=inference_folder,
script='forecasting_script.py',
arguments=['--target_column_name',
target_column_name,
'--test_dataset',
test_dataset.as_named_input(test_dataset.name)],
compute_target=compute_target,
environment=inference_env)
config = ScriptRunConfig(
source_directory=inference_folder,
script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name,
"--test_dataset",
test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target,
environment=inference_env,
)
run = test_experiment.submit(config,
tags={'training_run_id':
train_run.id,
'run_algorithm':
train_run.properties['run_algorithm'],
'valid_score':
train_run.properties['score'],
'primary_metric':
train_run.properties['primary_metric']})
run = test_experiment.submit(
config,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags['run_algorithm'])
run.log("run_algorithm", run.tags["run_algorithm"])
return run

View File

@@ -0,0 +1,725 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.png)"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"# Automated Machine Learning\n",
"**Github DAU Forecasting**\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Data](#Data)\n",
"1. [Train](#Train)\n",
"1. [Evaluate](#Evaluate)"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"## Introduction\n",
"This notebook demonstrates demand forecasting for Github Daily Active Users Dataset using AutoML.\n",
"\n",
"AutoML highlights here include using Deep Learning forecasts, Arima, Prophet, Remote Execution and Remote Inferencing, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
"\n",
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
"\n",
"Notebook synopsis:\n",
"\n",
"1. Creating an Experiment in an existing Workspace\n",
"2. Configuration and remote run of AutoML for a time-series model exploring Regression learners, Arima, Prophet and DNNs\n",
"4. Evaluating the fitted model using a rolling test "
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"## Setup\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"import os\n",
"import azureml.core\n",
"import pandas as pd\n",
"import numpy as np\n",
"import logging\n",
"import warnings\n",
"\n",
"from pandas.tseries.frequencies import to_offset\n",
"\n",
"# Squash warning messages for cleaner output in the notebook\n",
"warnings.showwarning = lambda *args, **kwargs: None\n",
"\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.train.automl import AutoMLConfig\n",
"from matplotlib import pyplot as plt\n",
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
"from azureml.train.estimator import Estimator"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"As part of the setup you have already created a <b>Workspace</b>. To run AutoML, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for the run history container in the workspace\n",
"experiment_name = \"github-remote-cpu\"\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"### Using AmlCompute\n",
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# Choose a name for your CPU cluster\n",
"cpu_cluster_name = \"github-cluster\"\n",
"\n",
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"## Data\n",
"Read Github DAU data from file, and preview data."
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"Let's set up what we know about the dataset. \n",
"\n",
"**Target column** is what we want to forecast.\n",
"\n",
"**Time column** is the time axis along which to predict.\n",
"\n",
"**Time series identifier columns** are identified by values of the columns listed `time_series_id_column_names`, for example \"store\" and \"item\" if your data has multiple time series of sales, one series for each combination of store and item sold.\n",
"\n",
"**Forecast frequency (freq)** This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.\n",
"\n",
"This dataset has only one time series. Please see the [orange juice notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales) for an example of a multi-time series dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"import pandas as pd\n",
"from pandas import DataFrame\n",
"from pandas import Grouper\n",
"from pandas import concat\n",
"from pandas.plotting import register_matplotlib_converters\n",
"\n",
"register_matplotlib_converters()\n",
"plt.figure(figsize=(20, 10))\n",
"plt.tight_layout()\n",
"\n",
"plt.subplot(2, 1, 1)\n",
"plt.title(\"Github Daily Active User By Year\")\n",
"df = pd.read_csv(\"github_dau_2011-2018_train.csv\", parse_dates=True, index_col=\"date\")\n",
"test_df = pd.read_csv(\n",
" \"github_dau_2011-2018_test.csv\", parse_dates=True, index_col=\"date\"\n",
")\n",
"plt.plot(df)\n",
"\n",
"plt.subplot(2, 1, 2)\n",
"plt.title(\"Github Daily Active User By Month\")\n",
"groups = df.groupby(df.index.month)\n",
"months = concat([DataFrame(x[1].values) for x in groups], axis=1)\n",
"months = DataFrame(months)\n",
"months.columns = range(1, 49)\n",
"months.boxplot()\n",
"\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"target_column_name = \"count\"\n",
"time_column_name = \"date\"\n",
"time_series_id_column_names = []\n",
"freq = \"D\" # Daily data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Split Training data into Train and Validation set and Upload to Datastores"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from helper import split_fraction_by_grain\n",
"from helper import split_full_for_forecasting\n",
"\n",
"train, valid = split_full_for_forecasting(df, time_column_name)\n",
"train.to_csv(\"train.csv\")\n",
"valid.to_csv(\"valid.csv\")\n",
"test_df.to_csv(\"test.csv\")\n",
"\n",
"datastore = ws.get_default_datastore()\n",
"datastore.upload_files(\n",
" files=[\"./train.csv\"],\n",
" target_path=\"github-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./valid.csv\"],\n",
" target_path=\"github-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./test.csv\"],\n",
" target_path=\"github-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"\n",
"from azureml.core import Dataset\n",
"\n",
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/train.csv\")]\n",
")\n",
"valid_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/valid.csv\")]\n",
")\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/test.csv\")]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"### Setting forecaster maximum horizon \n",
"\n",
"The forecast horizon is the number of periods into the future that the model should predict. Here, we set the horizon to 12 periods (i.e. 12 months). Notice that this is much shorter than the number of months in the test set; we will need to use a rolling test to evaluate the performance on the whole test set. For more discussion of forecast horizons and guiding principles for setting them, please see the [energy demand notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand). "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"forecast_horizon = 12"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"## Train\n",
"\n",
"Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.\n",
"\n",
"|Property|Description|\n",
"|-|-|\n",
"|**task**|forecasting|\n",
"|**primary_metric**|This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>\n",
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
"|**training_data**|Input dataset, containing both features and label column.|\n",
"|**label_column_name**|The name of the label column.|\n",
"|**enable_dnn**|Enable Forecasting DNNs|\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"\n",
"forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name,\n",
" forecast_horizon=forecast_horizon,\n",
" freq=\"D\", # Set the forecast frequency to be daily\n",
")\n",
"\n",
"# We will disable the enable_early_stopping flag to ensure the DNN model is recommended for demonstration purpose.\n",
"automl_config = AutoMLConfig(\n",
" task=\"forecasting\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n",
" experiment_timeout_hours=1,\n",
" training_data=train_dataset,\n",
" label_column_name=target_column_name,\n",
" validation_data=valid_dataset,\n",
" verbosity=logging.INFO,\n",
" compute_target=compute_target,\n",
" max_concurrent_iterations=4,\n",
" max_cores_per_iteration=-1,\n",
" enable_dnn=True,\n",
" enable_early_stopping=False,\n",
" forecasting_parameters=forecasting_parameters,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"We will now run the experiment, starting with 10 iterations of model search. The experiment can be continued for more iterations if more accurate results are required. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"remote_run = experiment.submit(automl_config, show_output=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"# If you need to retrieve a run that already started, use the following code\n",
"# from azureml.train.automl.run import AutoMLRun\n",
"# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"Displaying the run objects gives you links to the visual tools in the Azure Portal. Go try them!"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"### Retrieve the Best Model for Each Algorithm\n",
"Below we select the best pipeline from our iterations. The get_output method on automl_classifier returns the best run and the fitted model for the last fit invocation. There are overloads on get_output that allow you to retrieve the best run and fitted model for any logged metric or a particular iteration."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from helper import get_result_df\n",
"\n",
"summary_df = get_result_df(remote_run)\n",
"summary_df"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from azureml.core.run import Run\n",
"from azureml.widgets import RunDetails\n",
"\n",
"forecast_model = \"TCNForecaster\"\n",
"if not forecast_model in summary_df[\"run_id\"]:\n",
" forecast_model = \"ForecastTCN\"\n",
"\n",
"best_dnn_run_id = summary_df[\"run_id\"][forecast_model]\n",
"best_dnn_run = Run(experiment, best_dnn_run_id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"best_dnn_run.parent\n",
"RunDetails(best_dnn_run.parent).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"best_dnn_run\n",
"RunDetails(best_dnn_run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"## Evaluate on Test Data"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"We now use the best fitted model from the AutoML Run to make forecasts for the test set. \n",
"\n",
"We always score on the original dataset whose schema matches the training set schema."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/test.csv\")]\n",
")\n",
"# preview the first 3 rows of the dataset\n",
"test_dataset.take(5).to_pandas_dataframe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"compute_target = ws.compute_targets[\"github-cluster\"]\n",
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"import os\n",
"import shutil\n",
"\n",
"script_folder = os.path.join(os.getcwd(), \"inference\")\n",
"os.makedirs(script_folder, exist_ok=True)\n",
"shutil.copy(\"infer.py\", script_folder)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from helper import run_inference\n",
"\n",
"test_run = run_inference(\n",
" test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" best_dnn_run,\n",
" test_dataset,\n",
" valid_dataset,\n",
" forecast_horizon,\n",
" target_column_name,\n",
" time_column_name,\n",
" freq,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"RunDetails(test_run).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from helper import run_multiple_inferences\n",
"\n",
"summary_df = run_multiple_inferences(\n",
" summary_df,\n",
" experiment,\n",
" test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" test_dataset,\n",
" valid_dataset,\n",
" forecast_horizon,\n",
" target_column_name,\n",
" time_column_name,\n",
" freq,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"for run_name, run_summary in summary_df.iterrows():\n",
" print(run_name)\n",
" print(run_summary)\n",
" run_id = run_summary.run_id\n",
" test_run_id = run_summary.test_run_id\n",
" test_run = Run(test_experiment, test_run_id)\n",
" test_run.wait_for_completion()\n",
" test_score = test_run.get_metrics()[run_summary.primary_metric]\n",
" summary_df.loc[summary_df.run_id == run_id, \"Test Score\"] = test_score\n",
" print(\"Test Score: \", test_score)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"summary_df"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"hide_code_all_hidden": false,
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,455 @@
date,count,day_of_week,month_of_year,holiday
2017-06-04,104663,6.0,5.0,0.0
2017-06-05,155824,0.0,5.0,0.0
2017-06-06,164908,1.0,5.0,0.0
2017-06-07,170309,2.0,5.0,0.0
2017-06-08,164256,3.0,5.0,0.0
2017-06-09,153406,4.0,5.0,0.0
2017-06-10,97024,5.0,5.0,0.0
2017-06-11,103442,6.0,5.0,0.0
2017-06-12,160768,0.0,5.0,0.0
2017-06-13,166288,1.0,5.0,0.0
2017-06-14,163819,2.0,5.0,0.0
2017-06-15,157593,3.0,5.0,0.0
2017-06-16,149259,4.0,5.0,0.0
2017-06-17,95579,5.0,5.0,0.0
2017-06-18,98723,6.0,5.0,0.0
2017-06-19,159076,0.0,5.0,0.0
2017-06-20,163340,1.0,5.0,0.0
2017-06-21,163344,2.0,5.0,0.0
2017-06-22,159528,3.0,5.0,0.0
2017-06-23,146563,4.0,5.0,0.0
2017-06-24,92631,5.0,5.0,0.0
2017-06-25,96549,6.0,5.0,0.0
2017-06-26,153249,0.0,5.0,0.0
2017-06-27,160357,1.0,5.0,0.0
2017-06-28,159941,2.0,5.0,0.0
2017-06-29,156781,3.0,5.0,0.0
2017-06-30,144709,4.0,5.0,0.0
2017-07-01,89101,5.0,6.0,0.0
2017-07-02,93046,6.0,6.0,0.0
2017-07-03,144113,0.0,6.0,0.0
2017-07-04,143061,1.0,6.0,1.0
2017-07-05,154603,2.0,6.0,0.0
2017-07-06,157200,3.0,6.0,0.0
2017-07-07,147213,4.0,6.0,0.0
2017-07-08,92348,5.0,6.0,0.0
2017-07-09,97018,6.0,6.0,0.0
2017-07-10,157192,0.0,6.0,0.0
2017-07-11,161819,1.0,6.0,0.0
2017-07-12,161998,2.0,6.0,0.0
2017-07-13,160280,3.0,6.0,0.0
2017-07-14,146818,4.0,6.0,0.0
2017-07-15,93041,5.0,6.0,0.0
2017-07-16,97505,6.0,6.0,0.0
2017-07-17,156167,0.0,6.0,0.0
2017-07-18,162855,1.0,6.0,0.0
2017-07-19,162519,2.0,6.0,0.0
2017-07-20,159941,3.0,6.0,0.0
2017-07-21,148460,4.0,6.0,0.0
2017-07-22,93431,5.0,6.0,0.0
2017-07-23,98553,6.0,6.0,0.0
2017-07-24,156202,0.0,6.0,0.0
2017-07-25,162503,1.0,6.0,0.0
2017-07-26,158479,2.0,6.0,0.0
2017-07-27,158192,3.0,6.0,0.0
2017-07-28,147108,4.0,6.0,0.0
2017-07-29,93799,5.0,6.0,0.0
2017-07-30,97920,6.0,6.0,0.0
2017-07-31,152197,0.0,6.0,0.0
2017-08-01,158477,1.0,7.0,0.0
2017-08-02,159089,2.0,7.0,0.0
2017-08-03,157182,3.0,7.0,0.0
2017-08-04,146345,4.0,7.0,0.0
2017-08-05,92534,5.0,7.0,0.0
2017-08-06,97128,6.0,7.0,0.0
2017-08-07,151359,0.0,7.0,0.0
2017-08-08,159895,1.0,7.0,0.0
2017-08-09,158329,2.0,7.0,0.0
2017-08-10,155468,3.0,7.0,0.0
2017-08-11,144914,4.0,7.0,0.0
2017-08-12,92258,5.0,7.0,0.0
2017-08-13,95933,6.0,7.0,0.0
2017-08-14,147706,0.0,7.0,0.0
2017-08-15,151115,1.0,7.0,0.0
2017-08-16,157640,2.0,7.0,0.0
2017-08-17,156600,3.0,7.0,0.0
2017-08-18,146980,4.0,7.0,0.0
2017-08-19,94592,5.0,7.0,0.0
2017-08-20,99320,6.0,7.0,0.0
2017-08-21,145727,0.0,7.0,0.0
2017-08-22,160260,1.0,7.0,0.0
2017-08-23,160440,2.0,7.0,0.0
2017-08-24,157830,3.0,7.0,0.0
2017-08-25,145822,4.0,7.0,0.0
2017-08-26,94706,5.0,7.0,0.0
2017-08-27,99047,6.0,7.0,0.0
2017-08-28,152112,0.0,7.0,0.0
2017-08-29,162440,1.0,7.0,0.0
2017-08-30,162902,2.0,7.0,0.0
2017-08-31,159498,3.0,7.0,0.0
2017-09-01,145689,4.0,8.0,0.0
2017-09-02,93589,5.0,8.0,0.0
2017-09-03,100058,6.0,8.0,0.0
2017-09-04,140865,0.0,8.0,1.0
2017-09-05,165715,1.0,8.0,0.0
2017-09-06,167463,2.0,8.0,0.0
2017-09-07,164811,3.0,8.0,0.0
2017-09-08,156157,4.0,8.0,0.0
2017-09-09,101358,5.0,8.0,0.0
2017-09-10,107915,6.0,8.0,0.0
2017-09-11,167845,0.0,8.0,0.0
2017-09-12,172756,1.0,8.0,0.0
2017-09-13,172851,2.0,8.0,0.0
2017-09-14,171675,3.0,8.0,0.0
2017-09-15,159266,4.0,8.0,0.0
2017-09-16,103547,5.0,8.0,0.0
2017-09-17,110964,6.0,8.0,0.0
2017-09-18,170976,0.0,8.0,0.0
2017-09-19,177864,1.0,8.0,0.0
2017-09-20,173567,2.0,8.0,0.0
2017-09-21,172017,3.0,8.0,0.0
2017-09-22,161357,4.0,8.0,0.0
2017-09-23,104681,5.0,8.0,0.0
2017-09-24,111711,6.0,8.0,0.0
2017-09-25,173517,0.0,8.0,0.0
2017-09-26,180049,1.0,8.0,0.0
2017-09-27,178307,2.0,8.0,0.0
2017-09-28,174157,3.0,8.0,0.0
2017-09-29,161707,4.0,8.0,0.0
2017-09-30,110536,5.0,8.0,0.0
2017-10-01,106505,6.0,9.0,0.0
2017-10-02,157565,0.0,9.0,0.0
2017-10-03,164764,1.0,9.0,0.0
2017-10-04,163383,2.0,9.0,0.0
2017-10-05,162847,3.0,9.0,0.0
2017-10-06,153575,4.0,9.0,0.0
2017-10-07,107472,5.0,9.0,0.0
2017-10-08,116127,6.0,9.0,0.0
2017-10-09,174457,0.0,9.0,1.0
2017-10-10,185217,1.0,9.0,0.0
2017-10-11,185120,2.0,9.0,0.0
2017-10-12,180844,3.0,9.0,0.0
2017-10-13,170178,4.0,9.0,0.0
2017-10-14,112754,5.0,9.0,0.0
2017-10-15,121251,6.0,9.0,0.0
2017-10-16,183906,0.0,9.0,0.0
2017-10-17,188945,1.0,9.0,0.0
2017-10-18,187297,2.0,9.0,0.0
2017-10-19,183867,3.0,9.0,0.0
2017-10-20,173021,4.0,9.0,0.0
2017-10-21,115851,5.0,9.0,0.0
2017-10-22,126088,6.0,9.0,0.0
2017-10-23,189452,0.0,9.0,0.0
2017-10-24,194412,1.0,9.0,0.0
2017-10-25,192293,2.0,9.0,0.0
2017-10-26,190163,3.0,9.0,0.0
2017-10-27,177053,4.0,9.0,0.0
2017-10-28,114934,5.0,9.0,0.0
2017-10-29,125289,6.0,9.0,0.0
2017-10-30,189245,0.0,9.0,0.0
2017-10-31,191480,1.0,9.0,0.0
2017-11-01,182281,2.0,10.0,0.0
2017-11-02,186351,3.0,10.0,0.0
2017-11-03,175422,4.0,10.0,0.0
2017-11-04,118160,5.0,10.0,0.0
2017-11-05,127602,6.0,10.0,0.0
2017-11-06,191067,0.0,10.0,0.0
2017-11-07,197083,1.0,10.0,0.0
2017-11-08,194333,2.0,10.0,0.0
2017-11-09,193914,3.0,10.0,0.0
2017-11-10,179933,4.0,10.0,1.0
2017-11-11,121346,5.0,10.0,0.0
2017-11-12,131900,6.0,10.0,0.0
2017-11-13,196969,0.0,10.0,0.0
2017-11-14,201949,1.0,10.0,0.0
2017-11-15,198424,2.0,10.0,0.0
2017-11-16,196902,3.0,10.0,0.0
2017-11-17,183893,4.0,10.0,0.0
2017-11-18,122767,5.0,10.0,0.0
2017-11-19,130890,6.0,10.0,0.0
2017-11-20,194515,0.0,10.0,0.0
2017-11-21,198601,1.0,10.0,0.0
2017-11-22,191041,2.0,10.0,0.0
2017-11-23,170321,3.0,10.0,1.0
2017-11-24,155623,4.0,10.0,0.0
2017-11-25,115759,5.0,10.0,0.0
2017-11-26,128771,6.0,10.0,0.0
2017-11-27,199419,0.0,10.0,0.0
2017-11-28,207253,1.0,10.0,0.0
2017-11-29,205406,2.0,10.0,0.0
2017-11-30,200674,3.0,10.0,0.0
2017-12-01,187017,4.0,11.0,0.0
2017-12-02,129735,5.0,11.0,0.0
2017-12-03,139120,6.0,11.0,0.0
2017-12-04,205505,0.0,11.0,0.0
2017-12-05,208218,1.0,11.0,0.0
2017-12-06,202480,2.0,11.0,0.0
2017-12-07,197822,3.0,11.0,0.0
2017-12-08,180686,4.0,11.0,0.0
2017-12-09,123667,5.0,11.0,0.0
2017-12-10,130987,6.0,11.0,0.0
2017-12-11,193901,0.0,11.0,0.0
2017-12-12,194997,1.0,11.0,0.0
2017-12-13,192063,2.0,11.0,0.0
2017-12-14,186496,3.0,11.0,0.0
2017-12-15,170812,4.0,11.0,0.0
2017-12-16,110474,5.0,11.0,0.0
2017-12-17,118165,6.0,11.0,0.0
2017-12-18,176843,0.0,11.0,0.0
2017-12-19,179550,1.0,11.0,0.0
2017-12-20,173506,2.0,11.0,0.0
2017-12-21,165910,3.0,11.0,0.0
2017-12-22,145886,4.0,11.0,0.0
2017-12-23,95246,5.0,11.0,0.0
2017-12-24,88781,6.0,11.0,0.0
2017-12-25,98189,0.0,11.0,1.0
2017-12-26,121383,1.0,11.0,0.0
2017-12-27,135300,2.0,11.0,0.0
2017-12-28,136827,3.0,11.0,0.0
2017-12-29,127700,4.0,11.0,0.0
2017-12-30,93014,5.0,11.0,0.0
2017-12-31,82878,6.0,11.0,0.0
2018-01-01,86419,0.0,0.0,1.0
2018-01-02,147428,1.0,0.0,0.0
2018-01-03,162193,2.0,0.0,0.0
2018-01-04,163784,3.0,0.0,0.0
2018-01-05,158606,4.0,0.0,0.0
2018-01-06,113467,5.0,0.0,0.0
2018-01-07,118313,6.0,0.0,0.0
2018-01-08,175623,0.0,0.0,0.0
2018-01-09,183880,1.0,0.0,0.0
2018-01-10,183945,2.0,0.0,0.0
2018-01-11,181769,3.0,0.0,0.0
2018-01-12,170552,4.0,0.0,0.0
2018-01-13,115707,5.0,0.0,0.0
2018-01-14,121191,6.0,0.0,0.0
2018-01-15,176127,0.0,0.0,1.0
2018-01-16,188032,1.0,0.0,0.0
2018-01-17,189871,2.0,0.0,0.0
2018-01-18,189348,3.0,0.0,0.0
2018-01-19,177456,4.0,0.0,0.0
2018-01-20,123321,5.0,0.0,0.0
2018-01-21,128306,6.0,0.0,0.0
2018-01-22,186132,0.0,0.0,0.0
2018-01-23,197618,1.0,0.0,0.0
2018-01-24,196402,2.0,0.0,0.0
2018-01-25,192722,3.0,0.0,0.0
2018-01-26,179415,4.0,0.0,0.0
2018-01-27,125769,5.0,0.0,0.0
2018-01-28,133306,6.0,0.0,0.0
2018-01-29,194151,0.0,0.0,0.0
2018-01-30,198680,1.0,0.0,0.0
2018-01-31,198652,2.0,0.0,0.0
2018-02-01,195472,3.0,1.0,0.0
2018-02-02,183173,4.0,1.0,0.0
2018-02-03,124276,5.0,1.0,0.0
2018-02-04,129054,6.0,1.0,0.0
2018-02-05,190024,0.0,1.0,0.0
2018-02-06,198658,1.0,1.0,0.0
2018-02-07,198272,2.0,1.0,0.0
2018-02-08,195339,3.0,1.0,0.0
2018-02-09,183086,4.0,1.0,0.0
2018-02-10,122536,5.0,1.0,0.0
2018-02-11,133033,6.0,1.0,0.0
2018-02-12,185386,0.0,1.0,0.0
2018-02-13,184789,1.0,1.0,0.0
2018-02-14,176089,2.0,1.0,0.0
2018-02-15,171317,3.0,1.0,0.0
2018-02-16,162693,4.0,1.0,0.0
2018-02-17,116342,5.0,1.0,0.0
2018-02-18,122466,6.0,1.0,0.0
2018-02-19,172364,0.0,1.0,1.0
2018-02-20,185896,1.0,1.0,0.0
2018-02-21,188166,2.0,1.0,0.0
2018-02-22,189427,3.0,1.0,0.0
2018-02-23,178732,4.0,1.0,0.0
2018-02-24,132664,5.0,1.0,0.0
2018-02-25,134008,6.0,1.0,0.0
2018-02-26,200075,0.0,1.0,0.0
2018-02-27,207996,1.0,1.0,0.0
2018-02-28,204416,2.0,1.0,0.0
2018-03-01,201320,3.0,2.0,0.0
2018-03-02,188205,4.0,2.0,0.0
2018-03-03,131162,5.0,2.0,0.0
2018-03-04,138320,6.0,2.0,0.0
2018-03-05,207326,0.0,2.0,0.0
2018-03-06,212462,1.0,2.0,0.0
2018-03-07,209357,2.0,2.0,0.0
2018-03-08,194876,3.0,2.0,0.0
2018-03-09,193761,4.0,2.0,0.0
2018-03-10,133449,5.0,2.0,0.0
2018-03-11,142258,6.0,2.0,0.0
2018-03-12,208753,0.0,2.0,0.0
2018-03-13,210602,1.0,2.0,0.0
2018-03-14,214236,2.0,2.0,0.0
2018-03-15,210761,3.0,2.0,0.0
2018-03-16,196619,4.0,2.0,0.0
2018-03-17,133056,5.0,2.0,0.0
2018-03-18,141335,6.0,2.0,0.0
2018-03-19,211580,0.0,2.0,0.0
2018-03-20,219051,1.0,2.0,0.0
2018-03-21,215435,2.0,2.0,0.0
2018-03-22,211961,3.0,2.0,0.0
2018-03-23,196009,4.0,2.0,0.0
2018-03-24,132390,5.0,2.0,0.0
2018-03-25,140021,6.0,2.0,0.0
2018-03-26,205273,0.0,2.0,0.0
2018-03-27,212686,1.0,2.0,0.0
2018-03-28,210683,2.0,2.0,0.0
2018-03-29,189044,3.0,2.0,0.0
2018-03-30,170256,4.0,2.0,0.0
2018-03-31,125999,5.0,2.0,0.0
2018-04-01,126749,6.0,3.0,0.0
2018-04-02,186546,0.0,3.0,0.0
2018-04-03,207905,1.0,3.0,0.0
2018-04-04,201528,2.0,3.0,0.0
2018-04-05,188580,3.0,3.0,0.0
2018-04-06,173714,4.0,3.0,0.0
2018-04-07,125723,5.0,3.0,0.0
2018-04-08,142545,6.0,3.0,0.0
2018-04-09,204767,0.0,3.0,0.0
2018-04-10,212048,1.0,3.0,0.0
2018-04-11,210517,2.0,3.0,0.0
2018-04-12,206924,3.0,3.0,0.0
2018-04-13,191679,4.0,3.0,0.0
2018-04-14,126394,5.0,3.0,0.0
2018-04-15,137279,6.0,3.0,0.0
2018-04-16,208085,0.0,3.0,0.0
2018-04-17,213273,1.0,3.0,0.0
2018-04-18,211580,2.0,3.0,0.0
2018-04-19,206037,3.0,3.0,0.0
2018-04-20,191211,4.0,3.0,0.0
2018-04-21,125564,5.0,3.0,0.0
2018-04-22,136469,6.0,3.0,0.0
2018-04-23,206288,0.0,3.0,0.0
2018-04-24,212115,1.0,3.0,0.0
2018-04-25,207948,2.0,3.0,0.0
2018-04-26,205759,3.0,3.0,0.0
2018-04-27,181330,4.0,3.0,0.0
2018-04-28,130046,5.0,3.0,0.0
2018-04-29,120802,6.0,3.0,0.0
2018-04-30,170390,0.0,3.0,0.0
2018-05-01,169054,1.0,4.0,0.0
2018-05-02,197891,2.0,4.0,0.0
2018-05-03,199820,3.0,4.0,0.0
2018-05-04,186783,4.0,4.0,0.0
2018-05-05,124420,5.0,4.0,0.0
2018-05-06,130666,6.0,4.0,0.0
2018-05-07,196014,0.0,4.0,0.0
2018-05-08,203058,1.0,4.0,0.0
2018-05-09,198582,2.0,4.0,0.0
2018-05-10,191321,3.0,4.0,0.0
2018-05-11,183639,4.0,4.0,0.0
2018-05-12,122023,5.0,4.0,0.0
2018-05-13,128775,6.0,4.0,0.0
2018-05-14,199104,0.0,4.0,0.0
2018-05-15,200658,1.0,4.0,0.0
2018-05-16,201541,2.0,4.0,0.0
2018-05-17,196886,3.0,4.0,0.0
2018-05-18,188597,4.0,4.0,0.0
2018-05-19,121392,5.0,4.0,0.0
2018-05-20,126981,6.0,4.0,0.0
2018-05-21,189291,0.0,4.0,0.0
2018-05-22,203038,1.0,4.0,0.0
2018-05-23,205330,2.0,4.0,0.0
2018-05-24,199208,3.0,4.0,0.0
2018-05-25,187768,4.0,4.0,0.0
2018-05-26,117635,5.0,4.0,0.0
2018-05-27,124352,6.0,4.0,0.0
2018-05-28,180398,0.0,4.0,1.0
2018-05-29,194170,1.0,4.0,0.0
2018-05-30,200281,2.0,4.0,0.0
2018-05-31,197244,3.0,4.0,0.0
2018-06-01,184037,4.0,5.0,0.0
2018-06-02,121135,5.0,5.0,0.0
2018-06-03,129389,6.0,5.0,0.0
2018-06-04,200331,0.0,5.0,0.0
2018-06-05,207735,1.0,5.0,0.0
2018-06-06,203354,2.0,5.0,0.0
2018-06-07,200520,3.0,5.0,0.0
2018-06-08,182038,4.0,5.0,0.0
2018-06-09,120164,5.0,5.0,0.0
2018-06-10,125256,6.0,5.0,0.0
2018-06-11,194786,0.0,5.0,0.0
2018-06-12,200815,1.0,5.0,0.0
2018-06-13,197740,2.0,5.0,0.0
2018-06-14,192294,3.0,5.0,0.0
2018-06-15,173587,4.0,5.0,0.0
2018-06-16,105955,5.0,5.0,0.0
2018-06-17,110780,6.0,5.0,0.0
2018-06-18,174582,0.0,5.0,0.0
2018-06-19,193310,1.0,5.0,0.0
2018-06-20,193062,2.0,5.0,0.0
2018-06-21,187986,3.0,5.0,0.0
2018-06-22,173606,4.0,5.0,0.0
2018-06-23,111795,5.0,5.0,0.0
2018-06-24,116134,6.0,5.0,0.0
2018-06-25,185919,0.0,5.0,0.0
2018-06-26,193142,1.0,5.0,0.0
2018-06-27,188114,2.0,5.0,0.0
2018-06-28,183737,3.0,5.0,0.0
2018-06-29,171496,4.0,5.0,0.0
2018-06-30,107210,5.0,5.0,0.0
2018-07-01,111053,6.0,6.0,0.0
2018-07-02,176198,0.0,6.0,0.0
2018-07-03,184040,1.0,6.0,0.0
2018-07-04,169783,2.0,6.0,1.0
2018-07-05,177996,3.0,6.0,0.0
2018-07-06,167378,4.0,6.0,0.0
2018-07-07,106401,5.0,6.0,0.0
2018-07-08,112327,6.0,6.0,0.0
2018-07-09,182835,0.0,6.0,0.0
2018-07-10,187694,1.0,6.0,0.0
2018-07-11,185762,2.0,6.0,0.0
2018-07-12,184099,3.0,6.0,0.0
2018-07-13,170860,4.0,6.0,0.0
2018-07-14,106799,5.0,6.0,0.0
2018-07-15,108475,6.0,6.0,0.0
2018-07-16,175704,0.0,6.0,0.0
2018-07-17,183596,1.0,6.0,0.0
2018-07-18,179897,2.0,6.0,0.0
2018-07-19,183373,3.0,6.0,0.0
2018-07-20,169626,4.0,6.0,0.0
2018-07-21,106785,5.0,6.0,0.0
2018-07-22,112387,6.0,6.0,0.0
2018-07-23,180572,0.0,6.0,0.0
2018-07-24,186943,1.0,6.0,0.0
2018-07-25,185744,2.0,6.0,0.0
2018-07-26,183117,3.0,6.0,0.0
2018-07-27,168526,4.0,6.0,0.0
2018-07-28,105936,5.0,6.0,0.0
2018-07-29,111708,6.0,6.0,0.0
2018-07-30,179950,0.0,6.0,0.0
2018-07-31,185930,1.0,6.0,0.0
2018-08-01,183366,2.0,7.0,0.0
2018-08-02,182412,3.0,7.0,0.0
2018-08-03,173429,4.0,7.0,0.0
2018-08-04,106108,5.0,7.0,0.0
2018-08-05,110059,6.0,7.0,0.0
2018-08-06,178355,0.0,7.0,0.0
2018-08-07,185518,1.0,7.0,0.0
2018-08-08,183204,2.0,7.0,0.0
2018-08-09,181276,3.0,7.0,0.0
2018-08-10,168297,4.0,7.0,0.0
2018-08-11,106488,5.0,7.0,0.0
2018-08-12,111786,6.0,7.0,0.0
2018-08-13,178620,0.0,7.0,0.0
2018-08-14,181922,1.0,7.0,0.0
2018-08-15,172198,2.0,7.0,0.0
2018-08-16,177367,3.0,7.0,0.0
2018-08-17,166550,4.0,7.0,0.0
2018-08-18,107011,5.0,7.0,0.0
2018-08-19,112299,6.0,7.0,0.0
2018-08-20,176718,0.0,7.0,0.0
2018-08-21,182562,1.0,7.0,0.0
2018-08-22,181484,2.0,7.0,0.0
2018-08-23,180317,3.0,7.0,0.0
2018-08-24,170197,4.0,7.0,0.0
2018-08-25,109383,5.0,7.0,0.0
2018-08-26,113373,6.0,7.0,0.0
2018-08-27,180142,0.0,7.0,0.0
2018-08-28,191628,1.0,7.0,0.0
2018-08-29,191149,2.0,7.0,0.0
2018-08-30,187503,3.0,7.0,0.0
2018-08-31,172280,4.0,7.0,0.0
1 date count day_of_week month_of_year holiday
2 2017-06-04 104663 6.0 5.0 0.0
3 2017-06-05 155824 0.0 5.0 0.0
4 2017-06-06 164908 1.0 5.0 0.0
5 2017-06-07 170309 2.0 5.0 0.0
6 2017-06-08 164256 3.0 5.0 0.0
7 2017-06-09 153406 4.0 5.0 0.0
8 2017-06-10 97024 5.0 5.0 0.0
9 2017-06-11 103442 6.0 5.0 0.0
10 2017-06-12 160768 0.0 5.0 0.0
11 2017-06-13 166288 1.0 5.0 0.0
12 2017-06-14 163819 2.0 5.0 0.0
13 2017-06-15 157593 3.0 5.0 0.0
14 2017-06-16 149259 4.0 5.0 0.0
15 2017-06-17 95579 5.0 5.0 0.0
16 2017-06-18 98723 6.0 5.0 0.0
17 2017-06-19 159076 0.0 5.0 0.0
18 2017-06-20 163340 1.0 5.0 0.0
19 2017-06-21 163344 2.0 5.0 0.0
20 2017-06-22 159528 3.0 5.0 0.0
21 2017-06-23 146563 4.0 5.0 0.0
22 2017-06-24 92631 5.0 5.0 0.0
23 2017-06-25 96549 6.0 5.0 0.0
24 2017-06-26 153249 0.0 5.0 0.0
25 2017-06-27 160357 1.0 5.0 0.0
26 2017-06-28 159941 2.0 5.0 0.0
27 2017-06-29 156781 3.0 5.0 0.0
28 2017-06-30 144709 4.0 5.0 0.0
29 2017-07-01 89101 5.0 6.0 0.0
30 2017-07-02 93046 6.0 6.0 0.0
31 2017-07-03 144113 0.0 6.0 0.0
32 2017-07-04 143061 1.0 6.0 1.0
33 2017-07-05 154603 2.0 6.0 0.0
34 2017-07-06 157200 3.0 6.0 0.0
35 2017-07-07 147213 4.0 6.0 0.0
36 2017-07-08 92348 5.0 6.0 0.0
37 2017-07-09 97018 6.0 6.0 0.0
38 2017-07-10 157192 0.0 6.0 0.0
39 2017-07-11 161819 1.0 6.0 0.0
40 2017-07-12 161998 2.0 6.0 0.0
41 2017-07-13 160280 3.0 6.0 0.0
42 2017-07-14 146818 4.0 6.0 0.0
43 2017-07-15 93041 5.0 6.0 0.0
44 2017-07-16 97505 6.0 6.0 0.0
45 2017-07-17 156167 0.0 6.0 0.0
46 2017-07-18 162855 1.0 6.0 0.0
47 2017-07-19 162519 2.0 6.0 0.0
48 2017-07-20 159941 3.0 6.0 0.0
49 2017-07-21 148460 4.0 6.0 0.0
50 2017-07-22 93431 5.0 6.0 0.0
51 2017-07-23 98553 6.0 6.0 0.0
52 2017-07-24 156202 0.0 6.0 0.0
53 2017-07-25 162503 1.0 6.0 0.0
54 2017-07-26 158479 2.0 6.0 0.0
55 2017-07-27 158192 3.0 6.0 0.0
56 2017-07-28 147108 4.0 6.0 0.0
57 2017-07-29 93799 5.0 6.0 0.0
58 2017-07-30 97920 6.0 6.0 0.0
59 2017-07-31 152197 0.0 6.0 0.0
60 2017-08-01 158477 1.0 7.0 0.0
61 2017-08-02 159089 2.0 7.0 0.0
62 2017-08-03 157182 3.0 7.0 0.0
63 2017-08-04 146345 4.0 7.0 0.0
64 2017-08-05 92534 5.0 7.0 0.0
65 2017-08-06 97128 6.0 7.0 0.0
66 2017-08-07 151359 0.0 7.0 0.0
67 2017-08-08 159895 1.0 7.0 0.0
68 2017-08-09 158329 2.0 7.0 0.0
69 2017-08-10 155468 3.0 7.0 0.0
70 2017-08-11 144914 4.0 7.0 0.0
71 2017-08-12 92258 5.0 7.0 0.0
72 2017-08-13 95933 6.0 7.0 0.0
73 2017-08-14 147706 0.0 7.0 0.0
74 2017-08-15 151115 1.0 7.0 0.0
75 2017-08-16 157640 2.0 7.0 0.0
76 2017-08-17 156600 3.0 7.0 0.0
77 2017-08-18 146980 4.0 7.0 0.0
78 2017-08-19 94592 5.0 7.0 0.0
79 2017-08-20 99320 6.0 7.0 0.0
80 2017-08-21 145727 0.0 7.0 0.0
81 2017-08-22 160260 1.0 7.0 0.0
82 2017-08-23 160440 2.0 7.0 0.0
83 2017-08-24 157830 3.0 7.0 0.0
84 2017-08-25 145822 4.0 7.0 0.0
85 2017-08-26 94706 5.0 7.0 0.0
86 2017-08-27 99047 6.0 7.0 0.0
87 2017-08-28 152112 0.0 7.0 0.0
88 2017-08-29 162440 1.0 7.0 0.0
89 2017-08-30 162902 2.0 7.0 0.0
90 2017-08-31 159498 3.0 7.0 0.0
91 2017-09-01 145689 4.0 8.0 0.0
92 2017-09-02 93589 5.0 8.0 0.0
93 2017-09-03 100058 6.0 8.0 0.0
94 2017-09-04 140865 0.0 8.0 1.0
95 2017-09-05 165715 1.0 8.0 0.0
96 2017-09-06 167463 2.0 8.0 0.0
97 2017-09-07 164811 3.0 8.0 0.0
98 2017-09-08 156157 4.0 8.0 0.0
99 2017-09-09 101358 5.0 8.0 0.0
100 2017-09-10 107915 6.0 8.0 0.0
101 2017-09-11 167845 0.0 8.0 0.0
102 2017-09-12 172756 1.0 8.0 0.0
103 2017-09-13 172851 2.0 8.0 0.0
104 2017-09-14 171675 3.0 8.0 0.0
105 2017-09-15 159266 4.0 8.0 0.0
106 2017-09-16 103547 5.0 8.0 0.0
107 2017-09-17 110964 6.0 8.0 0.0
108 2017-09-18 170976 0.0 8.0 0.0
109 2017-09-19 177864 1.0 8.0 0.0
110 2017-09-20 173567 2.0 8.0 0.0
111 2017-09-21 172017 3.0 8.0 0.0
112 2017-09-22 161357 4.0 8.0 0.0
113 2017-09-23 104681 5.0 8.0 0.0
114 2017-09-24 111711 6.0 8.0 0.0
115 2017-09-25 173517 0.0 8.0 0.0
116 2017-09-26 180049 1.0 8.0 0.0
117 2017-09-27 178307 2.0 8.0 0.0
118 2017-09-28 174157 3.0 8.0 0.0
119 2017-09-29 161707 4.0 8.0 0.0
120 2017-09-30 110536 5.0 8.0 0.0
121 2017-10-01 106505 6.0 9.0 0.0
122 2017-10-02 157565 0.0 9.0 0.0
123 2017-10-03 164764 1.0 9.0 0.0
124 2017-10-04 163383 2.0 9.0 0.0
125 2017-10-05 162847 3.0 9.0 0.0
126 2017-10-06 153575 4.0 9.0 0.0
127 2017-10-07 107472 5.0 9.0 0.0
128 2017-10-08 116127 6.0 9.0 0.0
129 2017-10-09 174457 0.0 9.0 1.0
130 2017-10-10 185217 1.0 9.0 0.0
131 2017-10-11 185120 2.0 9.0 0.0
132 2017-10-12 180844 3.0 9.0 0.0
133 2017-10-13 170178 4.0 9.0 0.0
134 2017-10-14 112754 5.0 9.0 0.0
135 2017-10-15 121251 6.0 9.0 0.0
136 2017-10-16 183906 0.0 9.0 0.0
137 2017-10-17 188945 1.0 9.0 0.0
138 2017-10-18 187297 2.0 9.0 0.0
139 2017-10-19 183867 3.0 9.0 0.0
140 2017-10-20 173021 4.0 9.0 0.0
141 2017-10-21 115851 5.0 9.0 0.0
142 2017-10-22 126088 6.0 9.0 0.0
143 2017-10-23 189452 0.0 9.0 0.0
144 2017-10-24 194412 1.0 9.0 0.0
145 2017-10-25 192293 2.0 9.0 0.0
146 2017-10-26 190163 3.0 9.0 0.0
147 2017-10-27 177053 4.0 9.0 0.0
148 2017-10-28 114934 5.0 9.0 0.0
149 2017-10-29 125289 6.0 9.0 0.0
150 2017-10-30 189245 0.0 9.0 0.0
151 2017-10-31 191480 1.0 9.0 0.0
152 2017-11-01 182281 2.0 10.0 0.0
153 2017-11-02 186351 3.0 10.0 0.0
154 2017-11-03 175422 4.0 10.0 0.0
155 2017-11-04 118160 5.0 10.0 0.0
156 2017-11-05 127602 6.0 10.0 0.0
157 2017-11-06 191067 0.0 10.0 0.0
158 2017-11-07 197083 1.0 10.0 0.0
159 2017-11-08 194333 2.0 10.0 0.0
160 2017-11-09 193914 3.0 10.0 0.0
161 2017-11-10 179933 4.0 10.0 1.0
162 2017-11-11 121346 5.0 10.0 0.0
163 2017-11-12 131900 6.0 10.0 0.0
164 2017-11-13 196969 0.0 10.0 0.0
165 2017-11-14 201949 1.0 10.0 0.0
166 2017-11-15 198424 2.0 10.0 0.0
167 2017-11-16 196902 3.0 10.0 0.0
168 2017-11-17 183893 4.0 10.0 0.0
169 2017-11-18 122767 5.0 10.0 0.0
170 2017-11-19 130890 6.0 10.0 0.0
171 2017-11-20 194515 0.0 10.0 0.0
172 2017-11-21 198601 1.0 10.0 0.0
173 2017-11-22 191041 2.0 10.0 0.0
174 2017-11-23 170321 3.0 10.0 1.0
175 2017-11-24 155623 4.0 10.0 0.0
176 2017-11-25 115759 5.0 10.0 0.0
177 2017-11-26 128771 6.0 10.0 0.0
178 2017-11-27 199419 0.0 10.0 0.0
179 2017-11-28 207253 1.0 10.0 0.0
180 2017-11-29 205406 2.0 10.0 0.0
181 2017-11-30 200674 3.0 10.0 0.0
182 2017-12-01 187017 4.0 11.0 0.0
183 2017-12-02 129735 5.0 11.0 0.0
184 2017-12-03 139120 6.0 11.0 0.0
185 2017-12-04 205505 0.0 11.0 0.0
186 2017-12-05 208218 1.0 11.0 0.0
187 2017-12-06 202480 2.0 11.0 0.0
188 2017-12-07 197822 3.0 11.0 0.0
189 2017-12-08 180686 4.0 11.0 0.0
190 2017-12-09 123667 5.0 11.0 0.0
191 2017-12-10 130987 6.0 11.0 0.0
192 2017-12-11 193901 0.0 11.0 0.0
193 2017-12-12 194997 1.0 11.0 0.0
194 2017-12-13 192063 2.0 11.0 0.0
195 2017-12-14 186496 3.0 11.0 0.0
196 2017-12-15 170812 4.0 11.0 0.0
197 2017-12-16 110474 5.0 11.0 0.0
198 2017-12-17 118165 6.0 11.0 0.0
199 2017-12-18 176843 0.0 11.0 0.0
200 2017-12-19 179550 1.0 11.0 0.0
201 2017-12-20 173506 2.0 11.0 0.0
202 2017-12-21 165910 3.0 11.0 0.0
203 2017-12-22 145886 4.0 11.0 0.0
204 2017-12-23 95246 5.0 11.0 0.0
205 2017-12-24 88781 6.0 11.0 0.0
206 2017-12-25 98189 0.0 11.0 1.0
207 2017-12-26 121383 1.0 11.0 0.0
208 2017-12-27 135300 2.0 11.0 0.0
209 2017-12-28 136827 3.0 11.0 0.0
210 2017-12-29 127700 4.0 11.0 0.0
211 2017-12-30 93014 5.0 11.0 0.0
212 2017-12-31 82878 6.0 11.0 0.0
213 2018-01-01 86419 0.0 0.0 1.0
214 2018-01-02 147428 1.0 0.0 0.0
215 2018-01-03 162193 2.0 0.0 0.0
216 2018-01-04 163784 3.0 0.0 0.0
217 2018-01-05 158606 4.0 0.0 0.0
218 2018-01-06 113467 5.0 0.0 0.0
219 2018-01-07 118313 6.0 0.0 0.0
220 2018-01-08 175623 0.0 0.0 0.0
221 2018-01-09 183880 1.0 0.0 0.0
222 2018-01-10 183945 2.0 0.0 0.0
223 2018-01-11 181769 3.0 0.0 0.0
224 2018-01-12 170552 4.0 0.0 0.0
225 2018-01-13 115707 5.0 0.0 0.0
226 2018-01-14 121191 6.0 0.0 0.0
227 2018-01-15 176127 0.0 0.0 1.0
228 2018-01-16 188032 1.0 0.0 0.0
229 2018-01-17 189871 2.0 0.0 0.0
230 2018-01-18 189348 3.0 0.0 0.0
231 2018-01-19 177456 4.0 0.0 0.0
232 2018-01-20 123321 5.0 0.0 0.0
233 2018-01-21 128306 6.0 0.0 0.0
234 2018-01-22 186132 0.0 0.0 0.0
235 2018-01-23 197618 1.0 0.0 0.0
236 2018-01-24 196402 2.0 0.0 0.0
237 2018-01-25 192722 3.0 0.0 0.0
238 2018-01-26 179415 4.0 0.0 0.0
239 2018-01-27 125769 5.0 0.0 0.0
240 2018-01-28 133306 6.0 0.0 0.0
241 2018-01-29 194151 0.0 0.0 0.0
242 2018-01-30 198680 1.0 0.0 0.0
243 2018-01-31 198652 2.0 0.0 0.0
244 2018-02-01 195472 3.0 1.0 0.0
245 2018-02-02 183173 4.0 1.0 0.0
246 2018-02-03 124276 5.0 1.0 0.0
247 2018-02-04 129054 6.0 1.0 0.0
248 2018-02-05 190024 0.0 1.0 0.0
249 2018-02-06 198658 1.0 1.0 0.0
250 2018-02-07 198272 2.0 1.0 0.0
251 2018-02-08 195339 3.0 1.0 0.0
252 2018-02-09 183086 4.0 1.0 0.0
253 2018-02-10 122536 5.0 1.0 0.0
254 2018-02-11 133033 6.0 1.0 0.0
255 2018-02-12 185386 0.0 1.0 0.0
256 2018-02-13 184789 1.0 1.0 0.0
257 2018-02-14 176089 2.0 1.0 0.0
258 2018-02-15 171317 3.0 1.0 0.0
259 2018-02-16 162693 4.0 1.0 0.0
260 2018-02-17 116342 5.0 1.0 0.0
261 2018-02-18 122466 6.0 1.0 0.0
262 2018-02-19 172364 0.0 1.0 1.0
263 2018-02-20 185896 1.0 1.0 0.0
264 2018-02-21 188166 2.0 1.0 0.0
265 2018-02-22 189427 3.0 1.0 0.0
266 2018-02-23 178732 4.0 1.0 0.0
267 2018-02-24 132664 5.0 1.0 0.0
268 2018-02-25 134008 6.0 1.0 0.0
269 2018-02-26 200075 0.0 1.0 0.0
270 2018-02-27 207996 1.0 1.0 0.0
271 2018-02-28 204416 2.0 1.0 0.0
272 2018-03-01 201320 3.0 2.0 0.0
273 2018-03-02 188205 4.0 2.0 0.0
274 2018-03-03 131162 5.0 2.0 0.0
275 2018-03-04 138320 6.0 2.0 0.0
276 2018-03-05 207326 0.0 2.0 0.0
277 2018-03-06 212462 1.0 2.0 0.0
278 2018-03-07 209357 2.0 2.0 0.0
279 2018-03-08 194876 3.0 2.0 0.0
280 2018-03-09 193761 4.0 2.0 0.0
281 2018-03-10 133449 5.0 2.0 0.0
282 2018-03-11 142258 6.0 2.0 0.0
283 2018-03-12 208753 0.0 2.0 0.0
284 2018-03-13 210602 1.0 2.0 0.0
285 2018-03-14 214236 2.0 2.0 0.0
286 2018-03-15 210761 3.0 2.0 0.0
287 2018-03-16 196619 4.0 2.0 0.0
288 2018-03-17 133056 5.0 2.0 0.0
289 2018-03-18 141335 6.0 2.0 0.0
290 2018-03-19 211580 0.0 2.0 0.0
291 2018-03-20 219051 1.0 2.0 0.0
292 2018-03-21 215435 2.0 2.0 0.0
293 2018-03-22 211961 3.0 2.0 0.0
294 2018-03-23 196009 4.0 2.0 0.0
295 2018-03-24 132390 5.0 2.0 0.0
296 2018-03-25 140021 6.0 2.0 0.0
297 2018-03-26 205273 0.0 2.0 0.0
298 2018-03-27 212686 1.0 2.0 0.0
299 2018-03-28 210683 2.0 2.0 0.0
300 2018-03-29 189044 3.0 2.0 0.0
301 2018-03-30 170256 4.0 2.0 0.0
302 2018-03-31 125999 5.0 2.0 0.0
303 2018-04-01 126749 6.0 3.0 0.0
304 2018-04-02 186546 0.0 3.0 0.0
305 2018-04-03 207905 1.0 3.0 0.0
306 2018-04-04 201528 2.0 3.0 0.0
307 2018-04-05 188580 3.0 3.0 0.0
308 2018-04-06 173714 4.0 3.0 0.0
309 2018-04-07 125723 5.0 3.0 0.0
310 2018-04-08 142545 6.0 3.0 0.0
311 2018-04-09 204767 0.0 3.0 0.0
312 2018-04-10 212048 1.0 3.0 0.0
313 2018-04-11 210517 2.0 3.0 0.0
314 2018-04-12 206924 3.0 3.0 0.0
315 2018-04-13 191679 4.0 3.0 0.0
316 2018-04-14 126394 5.0 3.0 0.0
317 2018-04-15 137279 6.0 3.0 0.0
318 2018-04-16 208085 0.0 3.0 0.0
319 2018-04-17 213273 1.0 3.0 0.0
320 2018-04-18 211580 2.0 3.0 0.0
321 2018-04-19 206037 3.0 3.0 0.0
322 2018-04-20 191211 4.0 3.0 0.0
323 2018-04-21 125564 5.0 3.0 0.0
324 2018-04-22 136469 6.0 3.0 0.0
325 2018-04-23 206288 0.0 3.0 0.0
326 2018-04-24 212115 1.0 3.0 0.0
327 2018-04-25 207948 2.0 3.0 0.0
328 2018-04-26 205759 3.0 3.0 0.0
329 2018-04-27 181330 4.0 3.0 0.0
330 2018-04-28 130046 5.0 3.0 0.0
331 2018-04-29 120802 6.0 3.0 0.0
332 2018-04-30 170390 0.0 3.0 0.0
333 2018-05-01 169054 1.0 4.0 0.0
334 2018-05-02 197891 2.0 4.0 0.0
335 2018-05-03 199820 3.0 4.0 0.0
336 2018-05-04 186783 4.0 4.0 0.0
337 2018-05-05 124420 5.0 4.0 0.0
338 2018-05-06 130666 6.0 4.0 0.0
339 2018-05-07 196014 0.0 4.0 0.0
340 2018-05-08 203058 1.0 4.0 0.0
341 2018-05-09 198582 2.0 4.0 0.0
342 2018-05-10 191321 3.0 4.0 0.0
343 2018-05-11 183639 4.0 4.0 0.0
344 2018-05-12 122023 5.0 4.0 0.0
345 2018-05-13 128775 6.0 4.0 0.0
346 2018-05-14 199104 0.0 4.0 0.0
347 2018-05-15 200658 1.0 4.0 0.0
348 2018-05-16 201541 2.0 4.0 0.0
349 2018-05-17 196886 3.0 4.0 0.0
350 2018-05-18 188597 4.0 4.0 0.0
351 2018-05-19 121392 5.0 4.0 0.0
352 2018-05-20 126981 6.0 4.0 0.0
353 2018-05-21 189291 0.0 4.0 0.0
354 2018-05-22 203038 1.0 4.0 0.0
355 2018-05-23 205330 2.0 4.0 0.0
356 2018-05-24 199208 3.0 4.0 0.0
357 2018-05-25 187768 4.0 4.0 0.0
358 2018-05-26 117635 5.0 4.0 0.0
359 2018-05-27 124352 6.0 4.0 0.0
360 2018-05-28 180398 0.0 4.0 1.0
361 2018-05-29 194170 1.0 4.0 0.0
362 2018-05-30 200281 2.0 4.0 0.0
363 2018-05-31 197244 3.0 4.0 0.0
364 2018-06-01 184037 4.0 5.0 0.0
365 2018-06-02 121135 5.0 5.0 0.0
366 2018-06-03 129389 6.0 5.0 0.0
367 2018-06-04 200331 0.0 5.0 0.0
368 2018-06-05 207735 1.0 5.0 0.0
369 2018-06-06 203354 2.0 5.0 0.0
370 2018-06-07 200520 3.0 5.0 0.0
371 2018-06-08 182038 4.0 5.0 0.0
372 2018-06-09 120164 5.0 5.0 0.0
373 2018-06-10 125256 6.0 5.0 0.0
374 2018-06-11 194786 0.0 5.0 0.0
375 2018-06-12 200815 1.0 5.0 0.0
376 2018-06-13 197740 2.0 5.0 0.0
377 2018-06-14 192294 3.0 5.0 0.0
378 2018-06-15 173587 4.0 5.0 0.0
379 2018-06-16 105955 5.0 5.0 0.0
380 2018-06-17 110780 6.0 5.0 0.0
381 2018-06-18 174582 0.0 5.0 0.0
382 2018-06-19 193310 1.0 5.0 0.0
383 2018-06-20 193062 2.0 5.0 0.0
384 2018-06-21 187986 3.0 5.0 0.0
385 2018-06-22 173606 4.0 5.0 0.0
386 2018-06-23 111795 5.0 5.0 0.0
387 2018-06-24 116134 6.0 5.0 0.0
388 2018-06-25 185919 0.0 5.0 0.0
389 2018-06-26 193142 1.0 5.0 0.0
390 2018-06-27 188114 2.0 5.0 0.0
391 2018-06-28 183737 3.0 5.0 0.0
392 2018-06-29 171496 4.0 5.0 0.0
393 2018-06-30 107210 5.0 5.0 0.0
394 2018-07-01 111053 6.0 6.0 0.0
395 2018-07-02 176198 0.0 6.0 0.0
396 2018-07-03 184040 1.0 6.0 0.0
397 2018-07-04 169783 2.0 6.0 1.0
398 2018-07-05 177996 3.0 6.0 0.0
399 2018-07-06 167378 4.0 6.0 0.0
400 2018-07-07 106401 5.0 6.0 0.0
401 2018-07-08 112327 6.0 6.0 0.0
402 2018-07-09 182835 0.0 6.0 0.0
403 2018-07-10 187694 1.0 6.0 0.0
404 2018-07-11 185762 2.0 6.0 0.0
405 2018-07-12 184099 3.0 6.0 0.0
406 2018-07-13 170860 4.0 6.0 0.0
407 2018-07-14 106799 5.0 6.0 0.0
408 2018-07-15 108475 6.0 6.0 0.0
409 2018-07-16 175704 0.0 6.0 0.0
410 2018-07-17 183596 1.0 6.0 0.0
411 2018-07-18 179897 2.0 6.0 0.0
412 2018-07-19 183373 3.0 6.0 0.0
413 2018-07-20 169626 4.0 6.0 0.0
414 2018-07-21 106785 5.0 6.0 0.0
415 2018-07-22 112387 6.0 6.0 0.0
416 2018-07-23 180572 0.0 6.0 0.0
417 2018-07-24 186943 1.0 6.0 0.0
418 2018-07-25 185744 2.0 6.0 0.0
419 2018-07-26 183117 3.0 6.0 0.0
420 2018-07-27 168526 4.0 6.0 0.0
421 2018-07-28 105936 5.0 6.0 0.0
422 2018-07-29 111708 6.0 6.0 0.0
423 2018-07-30 179950 0.0 6.0 0.0
424 2018-07-31 185930 1.0 6.0 0.0
425 2018-08-01 183366 2.0 7.0 0.0
426 2018-08-02 182412 3.0 7.0 0.0
427 2018-08-03 173429 4.0 7.0 0.0
428 2018-08-04 106108 5.0 7.0 0.0
429 2018-08-05 110059 6.0 7.0 0.0
430 2018-08-06 178355 0.0 7.0 0.0
431 2018-08-07 185518 1.0 7.0 0.0
432 2018-08-08 183204 2.0 7.0 0.0
433 2018-08-09 181276 3.0 7.0 0.0
434 2018-08-10 168297 4.0 7.0 0.0
435 2018-08-11 106488 5.0 7.0 0.0
436 2018-08-12 111786 6.0 7.0 0.0
437 2018-08-13 178620 0.0 7.0 0.0
438 2018-08-14 181922 1.0 7.0 0.0
439 2018-08-15 172198 2.0 7.0 0.0
440 2018-08-16 177367 3.0 7.0 0.0
441 2018-08-17 166550 4.0 7.0 0.0
442 2018-08-18 107011 5.0 7.0 0.0
443 2018-08-19 112299 6.0 7.0 0.0
444 2018-08-20 176718 0.0 7.0 0.0
445 2018-08-21 182562 1.0 7.0 0.0
446 2018-08-22 181484 2.0 7.0 0.0
447 2018-08-23 180317 3.0 7.0 0.0
448 2018-08-24 170197 4.0 7.0 0.0
449 2018-08-25 109383 5.0 7.0 0.0
450 2018-08-26 113373 6.0 7.0 0.0
451 2018-08-27 180142 0.0 7.0 0.0
452 2018-08-28 191628 1.0 7.0 0.0
453 2018-08-29 191149 2.0 7.0 0.0
454 2018-08-30 187503 3.0 7.0 0.0
455 2018-08-31 172280 4.0 7.0 0.0

View File

@@ -0,0 +1,183 @@
import pandas as pd
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies
from azureml.train.estimator import Estimator
from azureml.core.run import Run
from azureml.automl.core.shared import constants
def split_fraction_by_grain(df, fraction, time_column_name, grain_column_names=None):
if not grain_column_names:
df["tmp_grain_column"] = "grain"
grain_column_names = ["tmp_grain_column"]
"""Group df by grain and split on last n rows for each group."""
df_grouped = df.sort_values(time_column_name).groupby(
grain_column_names, group_keys=False
)
df_head = df_grouped.apply(
lambda dfg: dfg.iloc[: -int(len(dfg) * fraction)] if fraction > 0 else dfg
)
df_tail = df_grouped.apply(
lambda dfg: dfg.iloc[-int(len(dfg) * fraction) :] if fraction > 0 else dfg[:0]
)
if "tmp_grain_column" in grain_column_names:
for df2 in (df, df_head, df_tail):
df2.drop("tmp_grain_column", axis=1, inplace=True)
grain_column_names.remove("tmp_grain_column")
return df_head, df_tail
def split_full_for_forecasting(
df, time_column_name, grain_column_names=None, test_split=0.2
):
index_name = df.index.name
# Assumes that there isn't already a column called tmpindex
df["tmpindex"] = df.index
train_df, test_df = split_fraction_by_grain(
df, test_split, time_column_name, grain_column_names
)
train_df = train_df.set_index("tmpindex")
train_df.index.name = index_name
test_df = test_df.set_index("tmpindex")
test_df.index.name = index_name
df.drop("tmpindex", axis=1, inplace=True)
return train_df, test_df
def get_result_df(remote_run):
children = list(remote_run.get_children(recursive=True))
summary_df = pd.DataFrame(
index=["run_id", "run_algorithm", "primary_metric", "Score"]
)
goal_minimize = False
for run in children:
if (
run.get_status().lower() == constants.RunState.COMPLETE_RUN
and "run_algorithm" in run.properties
and "score" in run.properties
):
# We only count in the completed child runs.
summary_df[run.id] = [
run.id,
run.properties["run_algorithm"],
run.properties["primary_metric"],
float(run.properties["score"]),
]
if "goal" in run.properties:
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
summary_df = summary_df.T.sort_values(
"Score", ascending=goal_minimize
).drop_duplicates(["run_algorithm"])
summary_df = summary_df.set_index("run_algorithm")
return summary_df
def run_inference(
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
):
model_base_name = "model.pkl"
if "model_data_location" in train_run.properties:
model_location = train_run.properties["model_data_location"]
_, model_base_name = model_location.rsplit("/", 1)
train_run.download_file(
"outputs/{}".format(model_base_name), "inference/{}".format(model_base_name)
)
train_run.download_file("outputs/conda_env_v_1_0_0.yml", "inference/condafile.yml")
inference_env = Environment("myenv")
inference_env.docker.enabled = True
inference_env.python.conda_dependencies = CondaDependencies(
conda_dependencies_file_path="inference/condafile.yml"
)
est = Estimator(
source_directory=script_folder,
entry_script="infer.py",
script_params={
"--max_horizon": max_horizon,
"--target_column_name": target_column_name,
"--time_column_name": time_column_name,
"--frequency": freq,
"--model_path": model_base_name,
},
inputs=[
test_dataset.as_named_input("test_data"),
lookback_dataset.as_named_input("lookback_data"),
],
compute_target=compute_target,
environment_definition=inference_env,
)
run = test_experiment.submit(
est,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags["run_algorithm"])
return run
def run_multiple_inferences(
summary_df,
train_experiment,
test_experiment,
compute_target,
script_folder,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
):
for run_name, run_summary in summary_df.iterrows():
print(run_name)
print(run_summary)
run_id = run_summary.run_id
train_run = Run(train_experiment, run_id)
test_run = run_inference(
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
)
print(test_run)
summary_df.loc[summary_df.run_id == run_id, "test_run_id"] = test_run.id
return summary_df

View File

@@ -0,0 +1,386 @@
import argparse
import os
import numpy as np
import pandas as pd
from pandas.tseries.frequencies import to_offset
from sklearn.externals import joblib
from sklearn.metrics import mean_absolute_error, mean_squared_error
from azureml.automl.runtime.shared.score import scoring, constants
from azureml.core import Run
try:
import torch
_torch_present = True
except ImportError:
_torch_present = False
def align_outputs(
y_predicted,
X_trans,
X_test,
y_test,
predicted_column_name="predicted",
horizon_colname="horizon_origin",
):
"""
Demonstrates how to get the output aligned to the inputs
using pandas indexes. Helps understand what happened if
the output's shape differs from the input shape, or if
the data got re-sorted by time and grain during forecasting.
Typical causes of misalignment are:
* we predicted some periods that were missing in actuals -> drop from eval
* model was asked to predict past max_horizon -> increase max horizon
* data at start of X_test was needed for lags -> provide previous periods
"""
if horizon_colname in X_trans:
df_fcst = pd.DataFrame(
{
predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname],
}
)
else:
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
# y and X outputs are aligned by forecast() function contract
df_fcst.index = X_trans.index
# align original X_test to y_test
X_test_full = X_test.copy()
X_test_full[target_column_name] = y_test
# X_test_full's index does not include origin, so reset for merge
df_fcst.reset_index(inplace=True)
X_test_full = X_test_full.reset_index().drop(columns="index")
together = df_fcst.merge(X_test_full, how="right")
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = together[
together[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
return clean
def do_rolling_forecast_with_lookback(
fitted_model, X_test, y_test, max_horizon, X_lookback, y_lookback, freq="D"
):
"""
Produce forecasts on a rolling origin over the given test set.
Each iteration makes a forecast for the next 'max_horizon' periods
with respect to the current origin, then advances the origin by the
horizon time duration. The prediction context for each forecast is set so
that the forecaster uses the actual target values prior to the current
origin time for constructing lag features.
This function returns a concatenated DataFrame of rolling forecasts.
"""
print("Using lookback of size: ", y_lookback.size)
df_list = []
origin_time = X_test[time_column_name].min()
X = X_lookback.append(X_test)
y = np.concatenate((y_lookback, y_test), axis=0)
while origin_time <= X_test[time_column_name].max():
# Set the horizon time - end date of the forecast
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = X[time_column_name] < horizon_time
X_test_expand = X[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN)
if origin_time != X[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = X[time_column_name] < origin_time
context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y[test_context_expand_wind]
# Print some debug info
print(
"Horizon_time:",
horizon_time,
" origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
print("X_test")
print(X)
print("X_test_expand")
print(X_test_expand)
print("Type of X_test_expand: ", type(X_test_expand))
print("Type of y_query_expand: ", type(y_query_expand))
print("y_query_expand")
print(y_query_expand)
# Make a forecast out to the maximum horizon
# y_fcst, X_trans = y_query_expand, X_test_expand
y_fcst, X_trans = fitted_model.forecast(X_test_expand, y_query_expand)
print("y_fcst")
print(y_fcst)
# Align forecast with test set for dates within
# the current rolling window
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
df_list.append(
align_outputs(
y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X[test_roll_wind],
y[test_roll_wind],
)
)
# Advance the origin time
origin_time = horizon_time
return pd.concat(df_list, ignore_index=True)
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq="D"):
"""
Produce forecasts on a rolling origin over the given test set.
Each iteration makes a forecast for the next 'max_horizon' periods
with respect to the current origin, then advances the origin by the
horizon time duration. The prediction context for each forecast is set so
that the forecaster uses the actual target values prior to the current
origin time for constructing lag features.
This function returns a concatenated DataFrame of rolling forecasts.
"""
df_list = []
origin_time = X_test[time_column_name].min()
while origin_time <= X_test[time_column_name].max():
# Set the horizon time - end date of the forecast
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = X_test[time_column_name] < horizon_time
X_test_expand = X_test[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN)
if origin_time != X_test[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = X_test[time_column_name] < origin_time
context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y_test[test_context_expand_wind]
# Print some debug info
print(
"Horizon_time:",
horizon_time,
" origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
print("X_test")
print(X_test)
print("X_test_expand")
print(X_test_expand)
print("Type of X_test_expand: ", type(X_test_expand))
print("Type of y_query_expand: ", type(y_query_expand))
print("y_query_expand")
print(y_query_expand)
# Make a forecast out to the maximum horizon
y_fcst, X_trans = fitted_model.forecast(X_test_expand, y_query_expand)
print("y_fcst")
print(y_fcst)
# Align forecast with test set for dates within the
# current rolling window
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X_test[time_column_name] >= origin_time)
df_list.append(
align_outputs(
y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X_test[test_roll_wind],
y_test[test_roll_wind],
)
)
# Advance the origin time
origin_time = horizon_time
return pd.concat(df_list, ignore_index=True)
def APE(actual, pred):
"""
Calculate absolute percentage error.
Returns a vector of APE values with same length as actual/pred.
"""
return 100 * np.abs((actual - pred) / actual)
def MAPE(actual, pred):
"""
Calculate mean absolute percentage error.
Remove NA and values where actual is close to zero
"""
not_na = ~(np.isnan(actual) | np.isnan(pred))
not_zero = ~np.isclose(actual, 0.0)
actual_safe = actual[not_na & not_zero]
pred_safe = pred[not_na & not_zero]
return np.mean(APE(actual_safe, pred_safe))
def map_location_cuda(storage, loc):
return storage.cuda()
parser = argparse.ArgumentParser()
parser.add_argument(
"--max_horizon",
type=int,
dest="max_horizon",
default=10,
help="Max Horizon for forecasting",
)
parser.add_argument(
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
"--time_column_name", type=str, dest="time_column_name", help="Time Column Name"
)
parser.add_argument(
"--frequency", type=str, dest="freq", help="Frequency of prediction"
)
parser.add_argument(
"--model_path",
type=str,
dest="model_path",
default="model.pkl",
help="Filename of model to be loaded",
)
args = parser.parse_args()
max_horizon = args.max_horizon
target_column_name = args.target_column_name
time_column_name = args.time_column_name
freq = args.freq
model_path = args.model_path
print("args passed are: ")
print(max_horizon)
print(target_column_name)
print(time_column_name)
print(freq)
print(model_path)
run = Run.get_context()
# get input dataset by name
test_dataset = run.input_datasets["test_data"]
lookback_dataset = run.input_datasets["lookback_data"]
grain_column_names = []
df = test_dataset.to_pandas_dataframe()
print("Read df")
print(df)
X_test_df = test_dataset.drop_columns(columns=[target_column_name])
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(
columns=[target_column_name]
)
X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name])
y_lookback_df = lookback_dataset.with_timestamp_columns(None).keep_columns(
columns=[target_column_name]
)
_, ext = os.path.splitext(model_path)
if ext == ".pt":
# Load the fc-tcn torch model.
assert _torch_present
if torch.cuda.is_available():
map_location = map_location_cuda
else:
map_location = "cpu"
with open(model_path, "rb") as fh:
fitted_model = torch.load(fh, map_location=map_location)
else:
# Load the sklearn pipeline.
fitted_model = joblib.load(model_path)
if hasattr(fitted_model, "get_lookback"):
lookback = fitted_model.get_lookback()
df_all = do_rolling_forecast_with_lookback(
fitted_model,
X_test_df.to_pandas_dataframe(),
y_test_df.to_pandas_dataframe().values.T[0],
max_horizon,
X_lookback_df.to_pandas_dataframe()[-lookback:],
y_lookback_df.to_pandas_dataframe().values.T[0][-lookback:],
freq,
)
else:
df_all = do_rolling_forecast(
fitted_model,
X_test_df.to_pandas_dataframe(),
y_test_df.to_pandas_dataframe().values.T[0],
max_horizon,
freq,
)
print(df_all)
print("target values:::")
print(df_all[target_column_name])
print("predicted values:::")
print(df_all["predicted"])
# Use the AutoML scoring module
regression_metrics = list(constants.REGRESSION_SCALAR_SET)
y_test = np.array(df_all[target_column_name])
y_pred = np.array(df_all["predicted"])
scores = scoring.score_regression(y_test, y_pred, regression_metrics)
print("scores:")
print(scores)
for key, value in scores.items():
run.log(key, value)
print("Simple forecasting model")
rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all["predicted"]))
print("[Test Data] \nRoot Mean squared error: %.2f" % rmse)
mae = mean_absolute_error(df_all[target_column_name], df_all["predicted"])
print("mean_absolute_error score: %.2f" % mae)
print("MAPE: %.2f" % MAPE(df_all[target_column_name], df_all["predicted"]))
run.log("rmse", rmse)
run.log("mae", mae)

View File

@@ -0,0 +1,94 @@
---
page_type: sample
languages:
- python
products:
- azure-machine-learning
description: Tutorial showing how to solve a complex machine learning time series forecasting problems at scale by using Azure Automated ML and Hierarchical time series accelerator.
---
## Microsoft Solution Accelerator: Hierachical Time Series Forecasting
In most applications, customers have a need to understand their forecasts at a macro and micro level of the business. Whether that be predicting sales of products at different geographic locations, or understanding the expected workforce demand for different organizations at a company, the ability to train a machine learning model to intelligently forecast on hierarchy data is essential.
This business pattern is common across a wide variety of industries and applicable to many real world use cases. Below are some examples of where the hierarchical time series pattern is useful.
| Industry | Scenario |
|----------------|--------------------------------------------|
| *Restaurant Chain* | Building demand forecasting models across thousands of restaurants and several countries. |
| *Retail Organization* | Building workforce optimization models for thousands of stores. |
| *Retail Organization*| Price optimization models for hundreds of thousands of products available. |
### Technical Summary
A hierarchical time series is a structure in which each of the unique series are arranged into a hierarchy based on dimensions such as geography, or product type. The table below shows an example of data whose unique attributes form a hierarchy. Our hierarchy is defined by the `product type` such as headphones or tablets, the `product category` which splits product types into accessories and devices, and the `region` the products are sold in. The table below demonstrates the first input of each unique series in the hierarchy.
![data-table](./media/data-table.png)
To further visualize this, the leaf levels of the hierarchy contain all the time series with unique combinations of attribute values. Each higher level in the hierarchy will consider one less dimension for defining the time series and will aggregate each set of `child nodes` from the lower level into a `parent node`.
![hierachy-sample](./media/hierarchy-sample-ms.PNG)
> **Note:** If no unique root level exists in the data, Automated Machine Learning will create a node `automl_top_level` for users to train or forecasts totals.
## Prerequisites
To use this solution accelerator, all you need is access to an [Azure subscription](https://azure.microsoft.com/free/) and an [Azure Machine Learning Workspace](https://docs.microsoft.com/azure/machine-learning/how-to-manage-workspace) that you'll create below.
A basic understanding of Azure Machine Learning and hierarchical time series concepts will be helpful for understanding the solution. The following resources can help introduce you to these concepts:
1. [Azure Machine Learning Overview](https://azure.microsoft.com/services/machine-learning/)
2. [Azure Machine Learning Tutorials](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup)
3. [Azure Machine Learning Sample Notebooks on Github](https://github.com/Azure/azureml-examples/)
4. [Forecasting: Principles and Practice, Hierarchical time series](https://otexts.com/fpp2/hts.html)
## Getting started
### 1. Set up the Compute Instance
Please create a [Compute Instance](https://docs.microsoft.com/en-us/azure/machine-learning/concept-compute-instance#create) and clone the git repo to your workspace.
### 2. Run the Notebook
Once your environment is set up, go to JupyterLab and run the notebook auto-ml-hierarchical-timeseries.ipynb on Compute Instance you created. It would run through the steps outlined sequentially. By the end, you'll know how to train, score, and make predictions using the hierarchical time series model pattern on Azure Machine Learning.
| Notebook | Description |
|----------------|--------------------------------------------|
| `auto-ml-forecasting-hierarchical-timeseries.ipynb`|Creates a pipeline to train machine learning models for the defined hierarchy and forecast at the desired hierarchy level using Automated ML. |
![Work Flow](./media/workflow.PNG)
## Key Concepts
### Automated Machine Learning
[Automated Machine Learning](https://docs.microsoft.com/azure/machine-learning/concept-automated-ml) also referred to as automated ML or AutoML, is the process of automating the time consuming, iterative tasks of machine learning model development. It allows data scientists, analysts, and developers to build ML models with high scale, efficiency, and productivity all while sustaining model quality.
### Pipelines
[Pipelines](https://docs.microsoft.com/azure/machine-learning/concept-ml-pipelines) allow you to create workflows in your machine learning projects. These workflows have a number of benefits including speed, simplicity, repeatability, and modularity.
### ParallelRunStep
[ParallelRunStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.parallel_run_step.parallelrunstep?view=azure-ml-py) enables the parallel training of models and is commonly used for batch inferencing. This [document](https://docs.microsoft.com/azure/machine-learning/how-to-use-parallel-run-step) walks through some of the key concepts around ParallelRunStep.
### Other Concepts
In additional to ParallelRunStep, Pipelines and Automated Machine Learning, you'll also be working with the following concepts including [workspace](https://docs.microsoft.com/azure/machine-learning/concept-workspace), [datasets](https://docs.microsoft.com/azure/machine-learning/concept-data#datasets), [compute targets](https://docs.microsoft.com/azure/machine-learning/concept-compute-target#train), [python script steps](https://docs.microsoft.com/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py), and [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/).
## Contributing
This project welcomes contributions and suggestions. To learn more visit the [contributing](CONTRIBUTING.md) section.
Most contributions require you to agree to a Contributor License Agreement (CLA)
declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

View File

@@ -0,0 +1,639 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Hierarchical Time Series - Automated ML\n",
"**_Generate hierarchical time series forecasts with Automated Machine Learning_**\n",
"\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
"\n",
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prerequisites\n",
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1.0 Set up workspace, datastore, experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003526897
}
},
"outputs": [],
"source": [
"import azureml.core\n",
"from azureml.core import Workspace, Datastore\n",
"import pandas as pd\n",
"\n",
"# Set up your workspace\n",
"ws = Workspace.from_config()\n",
"ws.get_details()\n",
"\n",
"# Set up your datastores\n",
"dstore = ws.get_default_datastore()\n",
"\n",
"output = {}\n",
"output[\"SDK version\"] = azureml.core.VERSION\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Default datastore name\"] = dstore.name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose an experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003540729
}
},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment = Experiment(ws, \"automl-hts\")\n",
"\n",
"print(\"Experiment name: \" + experiment.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2.0 Data\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"nteract": {
"transient": {
"deleting": false
}
}
},
"source": [
"### Upload local csv files to datastore\n",
"You can upload your train and inference csv files to the default datastore in your workspace. \n",
"\n",
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore.datastore?view=azure-ml-py) documentation on how to access data from Datastore."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"datastore_path = \"hts-sample\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"datastore = ws.get_default_datastore()\n",
"datastore"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create the TabularDatasets \n",
"\n",
"Datasets in Azure Machine Learning are references to specific data in a Datastore. The data can be retrieved as a [TabularDatasets](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py). We will read in the data as a pandas DataFrame, upload to the data store and register them to your Workspace using ```register_pandas_dataframe``` so they can be called as an input into the training pipeline. We will use the inference dataset as part of the forecasting pipeline. The step need only be completed once."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007017296
}
},
"outputs": [],
"source": [
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"registered_train = TabularDatasetFactory.register_pandas_dataframe(\n",
" pd.read_csv(\"Data/hts-sample-train.csv\"),\n",
" target=(datastore, \"hts-sample\"),\n",
" name=\"hts-sales-train\",\n",
")\n",
"registered_inference = TabularDatasetFactory.register_pandas_dataframe(\n",
" pd.read_csv(\"Data/hts-sample-test.csv\"),\n",
" target=(datastore, \"hts-sample\"),\n",
" name=\"hts-sales-test\",\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3.0 Build the training pipeline\n",
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose a compute target\n",
"\n",
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"\n",
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
"\n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007037308
}
},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"\n",
"# Name your cluster\n",
"compute_name = \"hts-compute\"\n",
"\n",
"\n",
"if compute_name in ws.compute_targets:\n",
" compute_target = ws.compute_targets[compute_name]\n",
" if compute_target and type(compute_target) is AmlCompute:\n",
" print(\"Found compute target: \" + compute_name)\n",
"else:\n",
" print(\"Creating a new compute target...\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_D16S_V3\", max_nodes=20\n",
" )\n",
" # Create the compute target\n",
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
"\n",
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
" # If no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
" )\n",
"\n",
" # For a more detailed view of current cluster status, use the 'status' property\n",
" print(compute_target.status.serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up training parameters\n",
"\n",
"This dictionary defines the AutoML and hierarchy settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, the hierarchy definition, and the level of the hierarchy at which to train.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n",
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
"| **label_column_name** | The name of the label column. |\n",
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **hierarchy_column_names** | The names of columns that define the hierarchical structure of the data from highest level to most granular. |\n",
"| **training_level** | The level of the hierarchy to be used for training models. |\n",
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
"| **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
"| **model_explainability** | Flag to disable explaining the best automated ML model at the end of all training iterations. The default is True and will block non-explainable models which may impact the forecast accuracy. For more information, see [Interpretability: model explanations in automated machine learning](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-machine-learning-interpretability-automl). |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007061544
}
},
"outputs": [],
"source": [
"from azureml.train.automl.runtime._hts.hts_parameters import HTSTrainParameters\n",
"\n",
"model_explainability = True\n",
"\n",
"engineered_explanations = False\n",
"# Define your hierarchy. Adjust the settings below based on your dataset.\n",
"hierarchy = [\"state\", \"store_id\", \"product_category\", \"SKU\"]\n",
"training_level = \"SKU\"\n",
"\n",
"# Set your forecast parameters. Adjust the settings below based on your dataset.\n",
"time_column_name = \"date\"\n",
"label_column_name = \"quantity\"\n",
"forecast_horizon = 7\n",
"\n",
"\n",
"automl_settings = {\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"label_column_name\": label_column_name,\n",
" \"time_column_name\": time_column_name,\n",
" \"forecast_horizon\": forecast_horizon,\n",
" \"hierarchy_column_names\": hierarchy,\n",
" \"hierarchy_training_level\": training_level,\n",
" \"track_child_runs\": False,\n",
" \"pipeline_fetch_max_batch_size\": 15,\n",
" \"model_explainability\": model_explainability,\n",
" # The following settings are specific to this sample and should be adjusted according to your own needs.\n",
" \"iteration_timeout_minutes\": 10,\n",
" \"iterations\": 10,\n",
" \"n_cross_validations\": 2,\n",
"}\n",
"\n",
"hts_parameters = HTSTrainParameters(\n",
" automl_settings=automl_settings,\n",
" hierarchy_column_names=hierarchy,\n",
" training_level=training_level,\n",
" enable_engineered_explanations=engineered_explanations,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up hierarchy training pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Parallel run step is leveraged to train the hierarchy. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The `process_count_per_node` is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
"\n",
"* **experiment:** The experiment used for training.\n",
"* **train_data:** The tabular dataset to be used as input to the training run.\n",
"* **node_count:** The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long.\n",
"* **process_count_per_node:** Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance.\n",
"* **train_pipeline_parameters:** The set of configuration parameters defined in the previous section. \n",
"\n",
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"\n",
"\n",
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
" experiment=experiment,\n",
" train_data=registered_train,\n",
" compute_target=compute_target,\n",
" node_count=2,\n",
" process_count_per_node=8,\n",
" train_pipeline_parameters=hts_parameters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit the pipeline to run\n",
"Next we submit our pipeline to run. The whole training pipeline takes about 1h using a Standard_D16_V3 VM with our current ParallelRunConfig setting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run = experiment.submit(training_pipeline)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### [Optional] Get the explanations\n",
"First we need to download the explanations to the local disk."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"if model_explainability:\n",
" expl_output = training_run.get_pipeline_output(\"explanations\")\n",
" expl_output.download(\"training_explanations\")\n",
"else:\n",
" print(\n",
" \"Model explanations are available only if model_explainability is set to True.\"\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The explanations are downloaded to the \"training_explanations/azureml\" directory."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"if model_explainability:\n",
" explanations_dirrectory = os.listdir(\n",
" os.path.join(\"training_explanations\", \"azureml\")\n",
" )\n",
" if len(explanations_dirrectory) > 1:\n",
" print(\n",
" \"Warning! The directory contains multiple explanations, only the first one will be displayed.\"\n",
" )\n",
" print(\"The explanations are located at {}.\".format(explanations_dirrectory[0]))\n",
" # Now we will list all the explanations.\n",
" explanation_path = os.path.join(\n",
" \"training_explanations\",\n",
" \"azureml\",\n",
" explanations_dirrectory[0],\n",
" \"training_explanations\",\n",
" )\n",
" print(\"Available explanations\")\n",
" print(\"==============================\")\n",
" print(\"\\n\".join(os.listdir(explanation_path)))\n",
"else:\n",
" print(\n",
" \"Model explanations are available only if model_explainability is set to True.\"\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"View the explanations on \"state\" level."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import display\n",
"\n",
"explanation_type = \"raw\"\n",
"level = \"state\"\n",
"\n",
"if model_explainability:\n",
" display(\n",
" pd.read_csv(\n",
" os.path.join(explanation_path, \"{}_explanations_{}.csv\").format(\n",
" explanation_type, level\n",
" )\n",
" )\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 5.0 Forecasting\n",
"For hierarchical forecasting we need to provide the HTSInferenceParameters object.\n",
"#### HTSInferenceParameters arguments\n",
"* **hierarchy_forecast_level:** The default level of the hierarchy to produce prediction/forecast on.\n",
"* **allocation_method:** \\[Optional] The disaggregation method to use if the hierarchy forecast level specified is below the define hierarchy training level. <br><i>(average historical proportions) 'average_historical_proportions'</i><br><i>(proportions of the historical averages) 'proportions_of_historical_average'</i>\n",
"\n",
"#### get_many_models_batch_inference_steps arguments\n",
"* **experiment:** The experiment used for inference run.\n",
"* **inference_data:** The data to use for inferencing. It should be the same schema as used for training.\n",
"* **compute_target:** The compute target that runs the inference pipeline.\n",
"* **node_count:** The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku).\n",
"* **process_count_per_node:** The number of processes per node.\n",
"* **train_run_id:** \\[Optional] The run id of the hierarchy training, by default it is the latest successful training hts run in the experiment.\n",
"* **train_experiment_name:** \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline.\n",
"* **process_count_per_node:** \\[Optional] The number of processes per node, by default it's 4."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.train.automl.runtime._hts.hts_parameters import HTSInferenceParameters\n",
"\n",
"inference_parameters = HTSInferenceParameters(\n",
" hierarchy_forecast_level=\"store_id\", # The setting is specific to this dataset and should be changed based on your dataset.\n",
" allocation_method=\"proportions_of_historical_average\",\n",
")\n",
"\n",
"steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
" experiment=experiment,\n",
" inference_data=registered_inference,\n",
" compute_target=compute_target,\n",
" inference_pipeline_parameters=inference_parameters,\n",
" node_count=2,\n",
" process_count_per_node=8,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"inference_pipeline = Pipeline(ws, steps=steps)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"inference_run = experiment.submit(inference_pipeline)\n",
"inference_run.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Retrieve results\n",
"\n",
"Forecast results can be retrieved through the following code. The prediction results summary and the actual predictions are downloaded in forecast_results folder"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"forecasts = inference_run.get_pipeline_output(\"forecasts\")\n",
"forecasts.download(\"forecast_results\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Resbumit the Pipeline\n",
"\n",
"The inference pipeline can be submitted with different configurations."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"inference_run = experiment.submit(\n",
" inference_pipeline, pipeline_parameters={\"hierarchy_forecast_level\": \"state\"}\n",
")\n",
"inference_run.wait_for_completion(show_output=False)"
]
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"categories": [
"how-to-use-azureml",
"automated-machine-learning"
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-hierarchical-timeseries
dependencies:
- pip:
- azureml-sdk

Binary file not shown.

After

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 165 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

View File

@@ -0,0 +1,3 @@
dependencies:
- pip:
- azureml-contrib-automl-pipeline-steps

View File

@@ -0,0 +1,122 @@
---
page_type: sample
languages:
- python
products:
- azure-machine-learning
description: Tutorial showing how to solve a complex machine learning time series forecasting problems at scale by using Azure Automated ML and Many Models solution accelerator.
---
![Many Models Solution Accelerator Banner](images/mmsa.png)
# Many Models Solution Accelerator
<!--
Guidelines on README format: https://review.docs.microsoft.com/help/onboard/admin/samples/concepts/readme-template?branch=master
Guidance on onboarding samples to docs.microsoft.com/samples: https://review.docs.microsoft.com/help/onboard/admin/samples/process/onboarding?branch=master
Taxonomies for products and languages: https://review.docs.microsoft.com/new-hope/information-architecture/metadata/taxonomies?branch=master
-->
In the real world, many problems can be too complex to be solved by a single machine learning model. Whether that be predicting sales for each individual store, building a predictive maintanence model for hundreds of oil wells, or tailoring an experience to individual users, building a model for each instance can lead to improved results on many machine learning problems.
This Pattern is very common across a wide variety of industries and applicable to many real world use cases. Below are some examples we have seen where this pattern is being used.
- Energy and utility companies building predictive maintenancemodelsforthousands of oil wells, hundreds of wind turbines or hundreds of smart meters
- Retail organizations building workforce optimization models for thousands of stores, campaign promotion propensity models, Price optimization models for hundreds of thousands of products they sell
- Restaurant chains buildingdemand forecasting models across thousands ofrestaurants
- Banks and financial institutes building models for cash replenishmentfor ATM Machine and for several ATMsor building personalized models for individuals
- Enterprises building revenue forecasting modelsat each division level
- Document management companies building text analytics and legal document search models per each state
Azure Machine Learning (AML) makes it easy to train, operate, and manage hundreds or even thousands of models. This repo will walk you through the end to end process of creating a many models solution from training to scoring to monitoring.
## Prerequisites
To use this solution accelerator, all you need is access to an [Azure subscription](https://azure.microsoft.com/free/) and an [Azure Machine Learning Workspace](https://docs.microsoft.com/azure/machine-learning/how-to-manage-workspace) that you'll create below.
While it's not required, a basic understanding of Azure Machine Learning will be helpful for understanding the solution. The following resources can help introduce you to AML:
1. [Azure Machine Learning Overview](https://azure.microsoft.com/services/machine-learning/)
2. [Azure Machine Learning Tutorials](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup)
3. [Azure Machine Learning Sample Notebooks on Github](https://github.com/Azure/azureml-examples)
## Getting started
### 1. Deploy Resources
Start by deploying the resources to Azure. The button below will deploy Azure Machine Learning and its related resources:
<a href="https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2Fmicrosoft%2Fsolution-accelerator-many-models%2Fmaster%2Fazuredeploy.json" target="_blank">
<img src="http://azuredeploy.net/deploybutton.png"/>
</a>
### 2. Configure Development Environment
Next you'll need to configure your [development environment](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment) for Azure Machine Learning. We recommend using a [Compute Instance](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment#compute-instance) as it's the fastest way to get up and running.
### 3. Run Notebooks
Once your development environment is set up, run through the Jupyter Notebooks sequentially following the steps outlined. By the end, you'll know how to train, score, and make predictions using the many models pattern on Azure Machine Learning.
![Sequence of Notebooks](./images/mmsa-overview.png)
## Contents
In this repo, you'll train and score a forecasting model for each orange juice brand and for each store at a (simulated) grocery chain. By the end, you'll have forecasted sales by using up to 11,973 models to predict sales for the next few weeks.
The data used in this sample is simulated based on the [Dominick's Orange Juice Dataset](http://www.cs.unitn.it/~taufer/QMMA/L10-OJ-Data.html#(1)), sales data from a Chicago area grocery store.
<img src="images/Flow_map.png" width="1000">
### Using Automated ML to train the models:
The [`auto-ml-forecasting-many-models.ipynb`](./auto-ml-forecasting-many-models.ipynb) noteboook is a guided solution accelerator that demonstrates steps from data preparation, to model training, and forecasting on train models as well as operationalizing the solution.
## How-to-videos
Watch these how-to-videos for a step by step walk-through of the many model solution accelerator to learn how to setup your models using Automated ML.
### Automated ML
[![Watch the video](https://media.giphy.com/media/dWUKfameudyNGRnp1t/giphy.gif)](https://channel9.msdn.com/Shows/Docs-AI/Building-Large-Scale-Machine-Learning-Forecasting-Models-using-Azure-Machine-Learnings-Automated-ML)
## Key concepts
### ParallelRunStep
[ParallelRunStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.parallel_run_step.parallelrunstep?view=azure-ml-py) enables the parallel training of models and is commonly used for batch inferencing. This [document](https://docs.microsoft.com/azure/machine-learning/how-to-use-parallel-run-step) walks through some of the key concepts around ParallelRunStep.
### Pipelines
[Pipelines](https://docs.microsoft.com/azure/machine-learning/concept-ml-pipelines) allow you to create workflows in your machine learning projects. These workflows have a number of benefits including speed, simplicity, repeatability, and modularity.
### Automated Machine Learning
[Automated Machine Learning](https://docs.microsoft.com/azure/machine-learning/concept-automated-ml) also referred to as automated ML or AutoML, is the process of automating the time consuming, iterative tasks of machine learning model development. It allows data scientists, analysts, and developers to build ML models with high scale, efficiency, and productivity all while sustaining model quality.
### Other Concepts
In additional to ParallelRunStep, Pipelines and Automated Machine Learning, you'll also be working with the following concepts including [workspace](https://docs.microsoft.com/azure/machine-learning/concept-workspace), [datasets](https://docs.microsoft.com/azure/machine-learning/concept-data#datasets), [compute targets](https://docs.microsoft.com/azure/machine-learning/concept-compute-target#train), [python script steps](https://docs.microsoft.com/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py), and [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/).
## Contributing
This project welcomes contributions and suggestions. To learn more visit the [contributing](../../../CONTRIBUTING.md) section.
Most contributions require you to agree to a Contributor License Agreement (CLA)
declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

View File

@@ -0,0 +1,746 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Many Models - Automated ML\n",
"**_Generate many models time series forecasts with Automated Machine Learning_**\n",
"\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
"\n",
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prerequisites\n",
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1.0 Set up workspace, datastore, experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003526897
}
},
"outputs": [],
"source": [
"import azureml.core\n",
"from azureml.core import Workspace, Datastore\n",
"import pandas as pd\n",
"\n",
"# Set up your workspace\n",
"ws = Workspace.from_config()\n",
"ws.get_details()\n",
"\n",
"# Set up your datastores\n",
"dstore = ws.get_default_datastore()\n",
"\n",
"output = {}\n",
"output[\"SDK version\"] = azureml.core.VERSION\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Default datastore name\"] = dstore.name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose an experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003540729
}
},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment = Experiment(ws, \"automl-many-models\")\n",
"\n",
"print(\"Experiment name: \" + experiment.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2.0 Data\n",
"\n",
"This notebook uses simulated orange juice sales data to walk you through the process of training many models on Azure Machine Learning using Automated ML. \n",
"\n",
"The time series data used in this example was simulated based on the University of Chicago's Dominick's Finer Foods dataset which featured two years of sales of 3 different orange juice brands for individual stores. The full simulated dataset includes 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern.\n",
"\n",
" \n",
"In this notebook, two datasets will be created: one with all 11,973 files and one with only 10 files that can be used to quickly test and debug. For each dataset, you'll be walked through the process of:\n",
"\n",
"1. Registering the blob container as a Datastore to the Workspace\n",
"2. Registering a tabular dataset to the Workspace"
]
},
{
"cell_type": "markdown",
"metadata": {
"nteract": {
"transient": {
"deleting": false
}
}
},
"source": [
"### 2.1 Data Preparation\n",
"The OJ data is available in the public blob container. The data is split to be used for training and for inferencing. For the current dataset, the data was split on time column ('WeekStarting') before and after '1992-5-28' .\n",
"\n",
"The container has\n",
"<ol>\n",
" <li><b>'oj-data-tabular'</b> and <b>'oj-inference-tabular'</b> folders that contains training and inference data respectively for the 11,973 models. </li>\n",
" <li>It also has <b>'oj-data-small-tabular'</b> and <b>'oj-inference-small-tabular'</b> folders that has training and inference data for 10 models.</li>\n",
"</ol>\n",
"\n",
"To create the [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) needed for the ParallelRunStep, you first need to register the blob container to the workspace."
]
},
{
"cell_type": "markdown",
"metadata": {
"nteract": {
"transient": {
"deleting": false
}
}
},
"source": [
"<b> To use your own data, put your own data in a blobstore folder. As shown it can be one file or multiple files. We can then register datastore using that blob as shown below.\n",
" \n",
"<h3> How sample data in blob store looks like</h3>\n",
"\n",
"['oj-data-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)</b>\n",
"![image-4.png](mm-1.png)\n",
"\n",
"['oj-inference-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
"![image-3.png](mm-2.png)\n",
"\n",
"['oj-data-small-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
"\n",
"![image-5.png](mm-3.png)\n",
"\n",
"['oj-inference-small-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
"![image-6.png](mm-4.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### 2.2 Register the blob container as DataStore\n",
"\n",
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
"\n",
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.\n",
"\n",
"In this next step, we will be registering blob storage as datastore to the Workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Datastore\n",
"\n",
"# Please change the following to point to your own blob container and pass in account_key\n",
"blob_datastore_name = \"automl_many_models\"\n",
"container_name = \"automl-sample-notebook-data\"\n",
"account_name = \"automlsamplenotebookdata\"\n",
"\n",
"oj_datastore = Datastore.register_azure_blob_container(\n",
" workspace=ws,\n",
" datastore_name=blob_datastore_name,\n",
" container_name=container_name,\n",
" account_name=account_name,\n",
" create_if_not_exists=True,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### 2.3 Using tabular datasets \n",
"\n",
"Now that the datastore is available from the Workspace, [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) can be created. Datasets in Azure Machine Learning are references to specific data in a Datastore. We are using TabularDataset, so that users who have their data which can be in one or many files (*.parquet or *.csv) and have not split up data according to group columns needed for training, can do so using out of box support for 'partiion_by' feature of TabularDataset shown in section 5.0 below."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007017296
}
},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"\n",
"ds_name_small = \"oj-data-small-tabular\"\n",
"input_ds_small = Dataset.Tabular.from_delimited_files(\n",
" path=oj_datastore.path(ds_name_small + \"/\"), validate=False\n",
")\n",
"\n",
"inference_name_small = \"oj-inference-small-tabular\"\n",
"inference_ds_small = Dataset.Tabular.from_delimited_files(\n",
" path=oj_datastore.path(inference_name_small + \"/\"), validate=False\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3.0 Build the training pipeline\n",
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose a compute target\n",
"\n",
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"\n",
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
"\n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007037308
}
},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"\n",
"# Name your cluster\n",
"compute_name = \"mm-compute\"\n",
"\n",
"\n",
"if compute_name in ws.compute_targets:\n",
" compute_target = ws.compute_targets[compute_name]\n",
" if compute_target and type(compute_target) is AmlCompute:\n",
" print(\"Found compute target: \" + compute_name)\n",
"else:\n",
" print(\"Creating a new compute target...\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_D16S_V3\", max_nodes=20\n",
" )\n",
" # Create the compute target\n",
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
"\n",
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
" # If no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
" )\n",
"\n",
" # For a more detailed view of current cluster status, use the 'status' property\n",
" print(compute_target.status.serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up training parameters\n",
"\n",
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n",
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
"| **label_column_name** | The name of the label column. |\n",
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
"| **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007061544
}
},
"outputs": [],
"source": [
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsTrainParameters,\n",
")\n",
"\n",
"partition_column_names = [\"Store\", \"Brand\"]\n",
"automl_settings = {\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
" \"iterations\": 15,\n",
" \"experiment_timeout_hours\": 0.25,\n",
" \"label_column_name\": \"Quantity\",\n",
" \"n_cross_validations\": 3,\n",
" \"time_column_name\": \"WeekStarting\",\n",
" \"drop_column_names\": \"Revenue\",\n",
" \"max_horizon\": 6,\n",
" \"grain_column_names\": partition_column_names,\n",
" \"track_child_runs\": False,\n",
"}\n",
"\n",
"mm_paramters = ManyModelsTrainParameters(\n",
" automl_settings=automl_settings, partition_column_names=partition_column_names\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up many models pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for training. |\n",
"| **train_data** | The file dataset to be used as input to the training run. |\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. |\n",
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
"\n",
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"\n",
"\n",
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
" experiment=experiment,\n",
" train_data=input_ds_small,\n",
" compute_target=compute_target,\n",
" node_count=2,\n",
" process_count_per_node=8,\n",
" run_invocation_timeout=920,\n",
" train_pipeline_parameters=mm_paramters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit the pipeline to run\n",
"Next we submit our pipeline to run. The whole training pipeline takes about 40m using a STANDARD_D16S_V3 VM with our current ParallelRunConfig setting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run = experiment.submit(training_pipeline)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 5.0 Publish and schedule the train pipeline (Optional)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 5.1 Publish the pipeline\n",
"\n",
"Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# published_pipeline = training_pipeline.publish(name = 'automl_train_many_models',\n",
"# description = 'train many models',\n",
"# version = '1',\n",
"# continue_on_step_failure = False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 7.2 Schedule the pipeline\n",
"You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
"\n",
"# training_pipeline_id = published_pipeline.id\n",
"\n",
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
"# recurring_schedule = Schedule.create(ws, name=\"automl_training_recurring_schedule\",\n",
"# description=\"Schedule Training Pipeline to run on the first day of every month\",\n",
"# pipeline_id=training_pipeline_id,\n",
"# experiment_name=experiment.name,\n",
"# recurrence=recurrence)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 6.0 Forecasting"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up output dataset for inference data\n",
"Output of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data import OutputFileDatasetConfig\n",
"\n",
"output_inference_data_ds = OutputFileDatasetConfig(\n",
" name=\"many_models_inference_output\", destination=(dstore, \"oj/inference_data/\")\n",
").register_on_complete(name=\"oj_inference_data_ds\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For many models we need to provide the ManyModelsInferenceParameters object.\n",
"\n",
"#### ManyModelsInferenceParameters arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **partition_column_names** | List of column names that identifies groups. |\n",
"| **target_column_name** | \\[Optional] Column name only if the inference dataset has the target. |\n",
"| **time_column_name** | \\[Optional] Column name only if it is timeseries. |\n",
"| **many_models_run_id** | \\[Optional] Many models run id where models were trained. |\n",
"\n",
"#### get_many_models_batch_inference_steps arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for inference run. |\n",
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
"| **compute_target** The compute target that runs the inference pipeline.|\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
"| **process_count_per_node** The number of processes per node.\n",
"| **train_run_id** | \\[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n",
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
"| **process_count_per_node** | \\[Optional] The number of processes per node, by default it's 4. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsInferenceParameters,\n",
")\n",
"\n",
"mm_parameters = ManyModelsInferenceParameters(\n",
" partition_column_names=[\"Store\", \"Brand\"],\n",
" time_column_name=\"WeekStarting\",\n",
" target_column_name=\"Quantity\",\n",
")\n",
"\n",
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
" experiment=experiment,\n",
" inference_data=inference_ds_small,\n",
" node_count=2,\n",
" process_count_per_node=8,\n",
" compute_target=compute_target,\n",
" run_invocation_timeout=300,\n",
" output_datastore=output_inference_data_ds,\n",
" train_run_id=training_run.id,\n",
" train_experiment_name=training_run.experiment.name,\n",
" inference_pipeline_parameters=mm_parameters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"inference_pipeline = Pipeline(ws, steps=inference_steps)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"inference_run = experiment.submit(inference_pipeline)\n",
"inference_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Retrieve results\n",
"\n",
"The forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. \n",
"\n",
"The following code snippet:\n",
"1. Downloads the contents of the output folder that is passed in the parallel run step \n",
"2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe and \n",
"3. Displays the top 10 rows of the predictions"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n",
"\n",
"forecasting_results_name = \"forecasting_results\"\n",
"forecasting_output_name = \"many_models_inference_output\"\n",
"forecast_file = get_output_from_mm_pipeline(\n",
" inference_run, forecasting_results_name, forecasting_output_name\n",
")\n",
"df = pd.read_csv(forecast_file, delimiter=\" \", header=None)\n",
"df.columns = [\n",
" \"Week Starting\",\n",
" \"Store\",\n",
" \"Brand\",\n",
" \"Quantity\",\n",
" \"Advert\",\n",
" \"Price\",\n",
" \"Revenue\",\n",
" \"Predicted\",\n",
"]\n",
"print(\n",
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
")\n",
"df.head(10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 7.0 Publish and schedule the inference pipeline (Optional)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 7.1 Publish the pipeline\n",
"\n",
"Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# published_pipeline_inf = inference_pipeline.publish(name = 'automl_forecast_many_models',\n",
"# description = 'forecast many models',\n",
"# version = '1',\n",
"# continue_on_step_failure = False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 7.2 Schedule the pipeline\n",
"You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
"\n",
"# forecasting_pipeline_id = published_pipeline.id\n",
"\n",
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
"# recurring_schedule = Schedule.create(ws, name=\"automl_forecasting_recurring_schedule\",\n",
"# description=\"Schedule Forecasting Pipeline to run on the first day of every week\",\n",
"# pipeline_id=forecasting_pipeline_id,\n",
"# experiment_name=experiment.name,\n",
"# recurrence=recurrence)"
]
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"categories": [
"how-to-use-azureml",
"automated-machine-learning"
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-many-models
dependencies:
- pip:
- azureml-sdk

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 306 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 158 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 631 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 176 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 165 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 166 KiB

View File

@@ -0,0 +1,3 @@
dependencies:
- pip:
- azureml-contrib-automl-pipeline-steps

View File

@@ -5,62 +5,20 @@ compute instance.
"""
import argparse
import pandas as pd
import numpy as np
from azureml.core import Dataset, Run
from azureml.automl.core.shared.constants import TimeSeriesInternal
from sklearn.externals import joblib
from pandas.tseries.frequencies import to_offset
def align_outputs(y_predicted, X_trans, X_test, y_test, target_column_name,
predicted_column_name='predicted',
horizon_colname='horizon_origin'):
"""
Demonstrates how to get the output aligned to the inputs
using pandas indexes. Helps understand what happened if
the output's shape differs from the input shape, or if
the data got re-sorted by time and grain during forecasting.
Typical causes of misalignment are:
* we predicted some periods that were missing in actuals -> drop from eval
* model was asked to predict past max_horizon -> increase max horizon
* data at start of X_test was needed for lags -> provide previous periods
"""
if (horizon_colname in X_trans):
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname]})
else:
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
# y and X outputs are aligned by forecast() function contract
df_fcst.index = X_trans.index
# align original X_test to y_test
X_test_full = X_test.copy()
X_test_full[target_column_name] = y_test
# X_test_full's index does not include origin, so reset for merge
df_fcst.reset_index(inplace=True)
X_test_full = X_test_full.reset_index().drop(columns='index')
together = df_fcst.merge(X_test_full, how='right')
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = together[together[[target_column_name,
predicted_column_name]].notnull().all(axis=1)]
return(clean)
parser = argparse.ArgumentParser()
parser.add_argument(
'--target_column_name', type=str, dest='target_column_name',
help='Target Column Name')
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
'--test_dataset', type=str, dest='test_dataset',
help='Test Dataset')
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
)
args = parser.parse_args()
target_column_name = args.target_column_name
@@ -76,14 +34,28 @@ X_test = test_dataset.to_pandas_dataframe().reset_index(drop=True)
y_test = X_test.pop(target_column_name).values
# generate forecast
fitted_model = joblib.load('model.pkl')
y_predictions, X_trans = fitted_model.forecast(X_test)
fitted_model = joblib.load("model.pkl")
# We have default quantiles values set as below(95th percentile)
quantiles = [0.025, 0.5, 0.975]
predicted_column_name = "predicted"
PI = "prediction_interval"
fitted_model.quantiles = quantiles
pred_quantiles = fitted_model.forecast_quantiles(X_test)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
)
X_test[target_column_name] = y_test
X_test[PI] = pred_quantiles[PI]
X_test[predicted_column_name] = pred_quantiles[0.5]
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = X_test[
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
# align output
df_all = align_outputs(y_predictions, X_trans, X_test, y_test, target_column_name)
file_name = 'outputs/predictions.csv'
export_csv = df_all.to_csv(file_name, header=True, index=False) # added Index
file_name = "outputs/predictions.csv"
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
# Upload the predictions into artifacts
run.upload_file(name=file_name, path_or_stream=file_name)

View File

@@ -3,36 +3,47 @@ import shutil
from azureml.core import ScriptRunConfig
def run_remote_inference(test_experiment, compute_target, train_run,
test_dataset, target_column_name, inference_folder='./forecast'):
def run_remote_inference(
test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
# Create local directory to copy the model.pkl and forecsting_script.py files into.
# These files will be uploaded to and executed on the compute instance.
os.makedirs(inference_folder, exist_ok=True)
shutil.copy('forecasting_script.py', inference_folder)
shutil.copy("forecasting_script.py", inference_folder)
train_run.download_file('outputs/model.pkl',
os.path.join(inference_folder, 'model.pkl'))
train_run.download_file(
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
)
inference_env = train_run.get_environment()
config = ScriptRunConfig(source_directory=inference_folder,
script='forecasting_script.py',
arguments=['--target_column_name',
target_column_name,
'--test_dataset',
test_dataset.as_named_input(test_dataset.name)],
compute_target=compute_target,
environment=inference_env)
config = ScriptRunConfig(
source_directory=inference_folder,
script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name,
"--test_dataset",
test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target,
environment=inference_env,
)
run = test_experiment.submit(config,
tags={'training_run_id':
train_run.id,
'run_algorithm':
train_run.properties['run_algorithm'],
'valid_score':
train_run.properties['score'],
'primary_metric':
train_run.properties['primary_metric']})
run = test_experiment.submit(
config,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags['run_algorithm'])
run.log("run_algorithm", run.tags["run_algorithm"])
return run

View File

@@ -0,0 +1,494 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/1_determine_experiment_settings.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In this notebook we will explore the univaraite time-series data to determine the settings for an automated ML experiment. We will follow the thought process depicted in the following diagram:<br/>\n",
"![Forecasting after training](figures/univariate_settings_map_20210408.jpg)\n",
"\n",
"The objective is to answer the following questions:\n",
"\n",
"<ol>\n",
" <li>Is there a seasonal pattern in the data? </li>\n",
" <ul style=\"margin-top:-1px; list-style-type:none\"> \n",
" <li> Importance: If we are able to detect regular seasonal patterns, the forecast accuracy may be improved by extracting these patterns and including them as features into the model. </li>\n",
" </ul>\n",
" <li>Is the data stationary? </li>\n",
" <ul style=\"margin-top:-1px; list-style-type:none\"> \n",
" <li> Importance: In the absense of features that capture trend behavior, ML models (regression and tree based) are not well equiped to predict stochastic trends. Working with stationary data solves this problem. </li>\n",
" </ul>\n",
" <li>Is there a detectable auto-regressive pattern in the stationary data? </li>\n",
" <ul style=\"margin-top:-1px; list-style-type:none\"> \n",
" <li> Importance: The accuracy of ML models can be improved if serial correlation is modeled by including lags of the dependent/target varaible as features. Including target lags in every experiment by default will result in a regression in accuracy scores if such setting is not warranted. </li>\n",
" </ul>\n",
"</ol>\n",
"\n",
"The answers to these questions will help determine the appropriate settings for the automated ML experiment.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import warnings\n",
"import pandas as pd\n",
"\n",
"from statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n",
"import matplotlib.pyplot as plt\n",
"from pandas.plotting import register_matplotlib_converters\n",
"\n",
"register_matplotlib_converters() # fixes the future warning issue\n",
"\n",
"from helper_functions import unit_root_test_wrapper\n",
"from statsmodels.tools.sm_exceptions import InterpolationWarning\n",
"\n",
"warnings.simplefilter(\"ignore\", InterpolationWarning)\n",
"\n",
"\n",
"# set printing options\n",
"pd.set_option(\"display.max_columns\", 500)\n",
"pd.set_option(\"display.width\", 1000)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# load data\n",
"main_data_loc = \"data\"\n",
"train_file_name = \"S4248SM144SCEN.csv\"\n",
"\n",
"TARGET_COLNAME = \"S4248SM144SCEN\"\n",
"TIME_COLNAME = \"observation_date\"\n",
"COVID_PERIOD_START = \"2020-03-01\"\n",
"\n",
"df = pd.read_csv(os.path.join(main_data_loc, train_file_name))\n",
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format=\"%Y-%m-%d\")\n",
"df.sort_values(by=TIME_COLNAME, inplace=True)\n",
"df.set_index(TIME_COLNAME, inplace=True)\n",
"df.head(2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# plot the entire dataset\n",
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
"ax.plot(df)\n",
"ax.title.set_text(\"Original Data Series\")\n",
"locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The graph plots the alcohol sales in the United States. Because the data is trending, it can be difficult to see cycles, seasonality or other interestng behaviors due to the scaling issues. For example, if there is a seasonal pattern, which we will discuss later, we cannot see them on the trending data. In such case, it is worth plotting the same data in first differences."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# plot the entire dataset in first differences\n",
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
"ax.plot(df.diff().dropna())\n",
"ax.title.set_text(\"Data in first differences\")\n",
"locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In the previous plot we observe that the data is more volatile towards the end of the series. This period coincides with the Covid-19 period, so we will exclude it from our experiment. Since in this example there are no user-provided features it is hard to make an argument that a model trained on the less volatile pre-covid data will be able to accurately predict the covid period."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 1. Seasonality\n",
"\n",
"#### Questions that need to be answered in this section:\n",
"1. Is there a seasonality?\n",
"2. If it's seasonal, does the data exhibit a trend (up or down)?\n",
"\n",
"It is hard to visually detect seasonality when the data is trending. The reason being is scale of seasonal fluctuations is dwarfed by the range of the trend in the data. One way to deal with this is to de-trend the data by taking the first differences. We will discuss this in more detail in the next section."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# plot the entire dataset in first differences\n",
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
"ax.plot(df.diff().dropna())\n",
"ax.title.set_text(\"Data in first differences\")\n",
"locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For the next plot, we will exclude the Covid period again. We will also shorten the length of data because plotting a very long time series may prevent us from seeing seasonal patterns, if there are any, because the plot may look like a random walk."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# remove COVID period\n",
"df = df[:COVID_PERIOD_START]\n",
"\n",
"# plot the entire dataset in first differences\n",
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
"ax.plot(df[\"2015-01-01\":].diff().dropna())\n",
"ax.title.set_text(\"Data in first differences\")\n",
"locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<p style=\"font-size:150%; color:blue\"> Conclusion </p>\n",
"\n",
"Visual examination does not suggest clear seasonal patterns. We will set the STL_TYPE = None, and we will move to the next section that examines stationarity. \n",
"\n",
"\n",
"Say, we are working with a different data set that shows clear patterns of seasonality, we have several options for setting the settings:is hard to say which option will work best in your case, hence you will need to run both options to see which one results in more accurate forecasts. </li>\n",
"<ol>\n",
" <li> If the data does not appear to be trending, set DIFFERENCE_SERIES=False, TARGET_LAGS=None and STL_TYPE = \"season\" </li>\n",
" <li> If the data appears to be trending, consider one of the following two settings:\n",
" <ul>\n",
" <ol type=\"a\">\n",
" <li> DIFFERENCE_SERIES=True, TARGET_LAGS=None and STL_TYPE = \"season\", or </li>\n",
" <li> DIFFERENCE_SERIES=False, TARGET_LAGS=None and STL_TYPE = \"trend_season\" </li>\n",
" </ol>\n",
" <li> In the first case, by taking first differences we are removing stochastic trend, but we do not remove seasonal patterns. In the second case, we do not remove the stochastic trend and it can be captured by the trend component of the STL decomposition. It is hard to say which option will work best in your case, hence you will need to run both options to see which one results in more accurate forecasts. </li>\n",
" </ul>\n",
"</ol>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 2. Stationarity\n",
"If the data does not exhibit seasonal patterns, we would like to see if the data is non-stationary. Particularly, we want to see if there is a clear trending behavior. If such behavior is observed, we would like to first difference the data and examine the plot of an auto-correlation function (ACF) known as correlogram. If the data is seasonal, differencing it will not get rid off the seasonality and this will be shown on the correlogram as well.\n",
"\n",
"<ul>\n",
" <li> Question: What is stationarity and how to we detect it? </li>\n",
" <ul>\n",
" <li> This is a fairly complex topic. Please read the following <a href=\"https://otexts.com/fpp2/stationarity.html\"> link </a> for a high level discussion on this subject. </li>\n",
" <li> Simply put, we are looking for scenario when examining the time series plots the mean of the series is roughly the same, regardless which time interval you pick to compute it. Thus, trending and seasonal data are examples of non-stationary series. </li>\n",
" </ul>\n",
"</ul>\n",
"\n",
"\n",
"<ul>\n",
" <li> Question: Why do want to work with stationary data?</li>\n",
" <ul> \n",
" <li> In the absence of features that capture stochastic trends, the ML models that use (deterministic) time based features (hour of the day, day of the week, month of the year, etc) cannot capture such trends, and will over or under predict depending on the behavior of the time series. By working with stationary data, we eliminate the need to predict such trends, which improves the forecast accuracy. Classical time series models such as Arima and Exponential Smoothing handle non-stationary series by design and do not need such transformations. By differencing the data we are still able to run the same family of models. </li>\n",
" </ul>\n",
"</ul>\n",
"\n",
"#### Questions that need to be answered in this section:\n",
"<ol> \n",
" <li> Is the data stationary? </li>\n",
" <li> Does the stationarized data (either the original or the differenced series) exhibit a clear auto-regressive pattern?</li>\n",
"</ol>\n",
"\n",
"To answer the first question, we run a series of tests (we call them unit root tests)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# unit root tests\n",
"test = unit_root_test_wrapper(df[TARGET_COLNAME])\n",
"print(\"---------------\", \"\\n\")\n",
"print(\"Summary table\", \"\\n\", test[\"summary\"], \"\\n\")\n",
"print(\"Is the {} series stationary?: {}\".format(TARGET_COLNAME, test[\"stationary\"]))\n",
"print(\"---------------\", \"\\n\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In the previous cell, we ran a series of unit root tests. The summary table contains the following columns:\n",
"<ul> \n",
" <li> test_name is the name of the test.\n",
" <ul> \n",
" <li> ADF: Augmented Dickey-Fuller test </li>\n",
" <li> KPSS: Kwiatkowski-PhillipsSchmidtShin test </li>\n",
" <li> PP: Phillips-Perron test\n",
" <li> ADF GLS: Augmented Dickey-Fuller using generalized least squares method </li>\n",
" <li> AZ: Andrews-Zivot test </li>\n",
" </ul>\n",
" <li> statistic: test statistic </li>\n",
" <li> crit_val: critical value of the test statistic </li>\n",
" <li> p_val: p-value of the test statistic. If the p-val is less than 0.05, the null hypothesis is rejected. </li>\n",
" <li> stationary: is the series stationary based on the test result? </li>\n",
" <li> Null hypothesis: what is being tested. Notice, some test such as ADF and PP assume the process has a unit root and looks for evidence to reject this hypothesis. Other tests, ex.g: KPSS, assumes the process is stationary and looks for evidence to reject such claim.\n",
"</ul>\n",
"\n",
"Each of the tests shows that the original time series is non-stationary. The final decision is based on the majority rule. If, there is a split decision, the algorithm will claim it is stationary. We run a series of tests because each test by itself may not be accurate. In many cases when there are conflicting test results, the user needs to make determination if the series is stationary or not.\n",
"\n",
"Since we found the series to be non-stationary, we will difference it and then test if the differenced series is stationary."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# unit root tests\n",
"test = unit_root_test_wrapper(df[TARGET_COLNAME].diff().dropna())\n",
"print(\"---------------\", \"\\n\")\n",
"print(\"Summary table\", \"\\n\", test[\"summary\"], \"\\n\")\n",
"print(\"Is the {} series stationary?: {}\".format(TARGET_COLNAME, test[\"stationary\"]))\n",
"print(\"---------------\", \"\\n\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Four out of five tests show that the series in first differences is stationary. Notice that this decision is not unanimous. Next, let's plot the original series in first-differences to illustrate the difference between non-stationary (unit root) process vs the stationary one."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# plot original and stationary data\n",
"fig = plt.figure(figsize=(10, 10))\n",
"ax1 = fig.add_subplot(211)\n",
"ax1.plot(df[TARGET_COLNAME], \"-b\")\n",
"ax2 = fig.add_subplot(212)\n",
"ax2.plot(df[TARGET_COLNAME].diff().dropna(), \"-b\")\n",
"ax1.title.set_text(\"Original data\")\n",
"ax2.title.set_text(\"Data in first differences\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If you were asked a question \"What is the mean of the series before and after 2008?\", for the series titled \"Original data\" the mean values will be significantly different. This implies that the first moment of the series (in this case, it is the mean) is time dependent, i.e., mean changes depending on the interval one is looking at. Thus, the series is deemed to be non-stationary. On the other hand, for the series titled \"Data in first differences\" the means for both periods are roughly the same. Hence, the first moment is time invariant; meaning it does not depend on the interval of time one is looking at. In this example it is easy to visually distinguish between stationary and non-stationary data. Often this distinction is not easy to make, therefore we rely on the statistical tests described above to help us make an informed decision. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<p style=\"font-size:150%; color:blue\"> Conclusion </p>\n",
"Since we found the original process to be non-stationary (contains unit root), we will have to model the data in first differences. As a result, we will set the DIFFERENCE_SERIES parameter to True."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 3 Check if there is a clear autoregressive pattern\n",
"We need to determine if we should include lags of the target variable as features in order to improve forecast accuracy. To do this, we will examine the ACF and partial ACF (PACF) plots of the stationary series. In our case, it is a series in first diffrences.\n",
"\n",
"<ul>\n",
" <li> Question: What is an Auto-regressive pattern? What are we looking for? </li>\n",
" <ul style=\"list-style-type:none;\">\n",
" <li> We are looking for a classical profiles for an AR(p) process such as an exponential decay of an ACF and a the first $p$ significant lags of the PACF. For a more detailed explanation of ACF and PACF please refer to the appendix at the end of this notebook. For illustration purposes, let's examine the ACF/PACF profiles of the simulated data that follows a second order auto-regressive process, abbreviated as an AR(2). <li/>\n",
" <li><img src=\"figures/ACF_PACF_for_AR2.png\" class=\"img_class\">\n",
" <br/>\n",
" The lag order is on the x-axis while the auto- and partial-correlation coefficients are on the y-axis. Vertical lines that are outside the shaded area represent statistically significant lags. Notice, the ACF function decays to zero and the PACF shows 2 significant spikes (we ignore the first spike for lag 0 in both plots since the linear relationship of any series with itself is always 1). <li/>\n",
" </ul>\n",
"<ul/>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<ul>\n",
" <li> Question: What do I do if I observe an auto-regressive behavior? </li>\n",
" <ul style=\"list-style-type:none;\">\n",
" <li> If such behavior is observed, we might improve the forecast accuracy by enabling the target lags feature in AutoML. There are a few options of doing this </li>\n",
" <ol>\n",
" <li> Set the target lags parameter to 'auto', or </li>\n",
" <li> Specify the list of lags you want to include. Ex.g: target_lags = [1,2,5] </li>\n",
" </ol>\n",
" </ul>\n",
" <br/>\n",
" <li> Next, let's examine the ACF and PACF plots of the stationary target variable (depicted below). Here, we do not see a decay in the ACF, instead we see a decay in PACF. It is hard to make an argument the the target variable exhibits auto-regressive behavior. </li>\n",
" </ul>"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Plot the ACF/PACF for the series in differences\n",
"fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n",
"plot_acf(df[TARGET_COLNAME].diff().dropna().values.squeeze(), ax=ax[0])\n",
"plot_pacf(df[TARGET_COLNAME].diff().dropna().values.squeeze(), ax=ax[1])\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<p style=\"font-size:150%; color:blue\"> Conclusion </p>\n",
"Since we do not see a clear indication of an AR(p) process, we will not be using target lags and will set the TARGET_LAGS parameter to None."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<p style=\"font-size:150%; color:blue; font-weight: bold\"> AutoML Experiment Settings </p>\n",
"Based on the analysis performed, we should try the following settings for the AutoML experiment and use them in the \"2_run_experiment\" notebook.\n",
"<ul>\n",
" <li> STL_TYPE=None </li>\n",
" <li> DIFFERENCE_SERIES=True </li>\n",
" <li> TARGET_LAGS=None </li>\n",
"</ul>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Appendix: ACF, PACF and Lag Selection\n",
"To do this, we will examine the ACF and partial ACF (PACF) plots of the differenced series. \n",
"\n",
"<ul>\n",
" <li> Question: What is the ACF? </li>\n",
" <ul style=\"list-style-type:none;\">\n",
" <li> To understand the ACF, first let's look at the correlation coefficient $\\rho_{xz}$\n",
" \\begin{equation}\n",
" \\rho_{xz} = \\frac{\\sigma_{xz}}{\\sigma_{x} \\sigma_{zy}}\n",
" \\end{equation}\n",
" </li>\n",
" where $\\sigma_{xzy}$ is the covariance between two random variables $X$ and $Z$; $\\sigma_x$ and $\\sigma_z$ is the variance for $X$ and $Z$, respectively. The correlation coefficient measures the strength of linear relationship between two random variables. This metric can take any value from -1 to 1. <li/>\n",
" <br/>\n",
" <li> The auto-correlation coefficient $\\rho_{Y_{t} Y_{t-k}}$ is the time series equivalent of the correlation coefficient, except instead of measuring linear association between two random variables $X$ and $Z$, it measures the strength of a linear relationship between a random variable $Y_t$ and its lag $Y_{t-k}$ for any positive interger value of $k$. </li> \n",
" <br />\n",
" <li> To visualize the ACF for a particular lag, say lag 2, plot the second lag of a series $y_{t-2}$ on the x-axis, and plot the series itself $y_t$ on the y-axis. The autocorrelation coefficient is the slope of the best fitted regression line and can be interpreted as follows. A one unit increase in the lag of a variable one period ago leads to a $\\rho_{Y_{t} Y_{t-2}}$ units change in the variable in the current period. This interpreation can be applied to any lag. </li> \n",
" <br />\n",
" <li> In the interpretation posted above we need to be careful not to confuse the word \"leads\" with \"causes\" since these are not the same thing. We do not know the lagged value of the varaible causes it to change. Afterall, there are probably many other features that may explain the movement in $Y_t$. All we are trying to do in this section is to identify situations when the variable contains the strong auto-regressive components that needs to be included in the model to improve forecast accuracy. </li>\n",
" </ul>\n",
"</ul>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<ul>\n",
" <li> Question: What is the PACF? </li>\n",
" <ul style=\"list-style-type:none;\">\n",
" <li> When describing the ACF we essentially running a regression between a partigular lag of a series, say, lag 4, and the series itself. What this implies is the regression coefficient for lag 4 captures the impact of everything that happens in lags 1, 2 and 3. In other words, if lag 1 is the most important lag and we exclude it from the regression, naturally, the regression model will assign the importance of the 1st lag to the 4th one. Partial auto-correlation function fixes this problem since it measures the contribution of each lag accounting for the information added by the intermediary lags. If we were to illustrate ACF and PACF for the fourth lag using the regression analogy, the difference is a follows: \n",
" \\begin{align}\n",
" Y_{t} &= a_{0} + a_{4} Y_{t-4} + e_{t} \\\\\n",
" Y_{t} &= b_{0} + b_{1} Y_{t-1} + b_{2} Y_{t-2} + b_{3} Y_{t-3} + b_{4} Y_{t-4} + \\varepsilon_{t} \\\\\n",
" \\end{align}\n",
" </li>\n",
" <br/>\n",
" <li>\n",
" Here, you can think of $a_4$ and $b_{4}$ as the auto- and partial auto-correlation coefficients for lag 4. Notice, in the second equation we explicitely accounting for the intermediate lags by adding them as regrerssors.\n",
" </li>\n",
" </ul>\n",
"</ul>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<ul>\n",
" <li> Question: Auto-regressive pattern? What are we looking for? </li>\n",
" <ul style=\"list-style-type:none;\">\n",
" <li> We are looking for a classical profiles for an AR(p) process such as an exponential decay of an ACF and a the first $p$ significant lags of the PACF. Let's examine the ACF/PACF profiles of the same simulated AR(2) shown in Section 3, and check if the ACF/PACF explanation are refelcted in these plots. <li/>\n",
" <li><img src=\"figures/ACF_PACF_for_AR2.png\" class=\"img_class\">\n",
" <li> The autocorrelation coefficient for the 3rd lag is 0.6, which can be interpreted that a one unit increase in the value of the target varaible three periods ago leads to 0.6 units increase in the current period. However, the PACF plot shows that the partial autocorrealtion coefficient is zero (from a statistical point of view since it lies within the shaded region). This is happening because the 1st and 2nd lags are good predictors of the target variable. Ommiting these two lags from the regression results in the misleading conclusion that the third lag is a good prediciton. <li/>\n",
" <br/>\n",
" <li> This is why it is important to examine both the ACF and the PACF plots when tring to determine the auto regressive order for the variable in question. <li/>\n",
" </ul>\n",
"</ul> "
]
}
],
"metadata": {
"authors": [
{
"name": "vlbejan"
}
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-univariate-recipe-experiment-settings
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,593 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/2_run_experiment.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Running AutoML experiments\n",
"\n",
"See the `auto-ml-forecasting-univariate-recipe-experiment-settings` notebook on how to determine settings for seasonal features, target lags and whether the series needs to be differenced or not. To make experimentation user-friendly, the user has to specify several parameters: DIFFERENCE_SERIES, TARGET_LAGS and STL_TYPE. Once these parameters are set, the notebook will generate correct transformations and settings to run experiments, generate forecasts, compute inference set metrics and plot forecast vs actuals. It will also convert the forecast from first differences to levels (original units of measurement) if the DIFFERENCE_SERIES parameter is set to True before calculating inference set metrics.\n",
"\n",
"<br/>\n",
"\n",
"The output generated by this notebook is saved in the `experiment_output`folder."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import logging\n",
"import pandas as pd\n",
"import numpy as np\n",
"\n",
"import azureml.automl.runtime\n",
"from azureml.core.compute import AmlCompute\n",
"from azureml.core.compute import ComputeTarget\n",
"import matplotlib.pyplot as plt\n",
"from helper_functions import ts_train_test_split, compute_metrics\n",
"\n",
"import azureml.core\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.train.automl import AutoMLConfig\n",
"\n",
"\n",
"# set printing options\n",
"np.set_printoptions(precision=4, suppress=True, linewidth=100)\n",
"pd.set_option(\"display.max_columns\", 500)\n",
"pd.set_option(\"display.width\", 1000)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As part of the setup you have already created a **Workspace**. You will also need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"amlcompute_cluster_name = \"recipe-cluster\"\n",
"\n",
"found = False\n",
"# Check if this compute target already exists in the workspace.\n",
"cts = ws.compute_targets\n",
"if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == \"AmlCompute\":\n",
" found = True\n",
" print(\"Found existing compute target.\")\n",
" compute_target = cts[amlcompute_cluster_name]\n",
"\n",
"if not found:\n",
" print(\"Creating a new compute target...\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_D2_V2\", max_nodes=6\n",
" )\n",
"\n",
" # Create the cluster.\\n\",\n",
" compute_target = ComputeTarget.create(\n",
" ws, amlcompute_cluster_name, provisioning_config\n",
" )\n",
"\n",
"print(\"Checking cluster status...\")\n",
"# Can poll for a minimum number of nodes and for a specific timeout.\n",
"# If no min_node_count is provided, it will use the scale settings for the cluster.\n",
"compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Data\n",
"\n",
"Here, we will load the data from the csv file and drop the Covid period."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"main_data_loc = \"data\"\n",
"train_file_name = \"S4248SM144SCEN.csv\"\n",
"\n",
"TARGET_COLNAME = \"S4248SM144SCEN\"\n",
"TIME_COLNAME = \"observation_date\"\n",
"COVID_PERIOD_START = (\n",
" \"2020-03-01\" # start of the covid period. To be excluded from evaluation.\n",
")\n",
"\n",
"# load data\n",
"df = pd.read_csv(os.path.join(main_data_loc, train_file_name))\n",
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format=\"%Y-%m-%d\")\n",
"df.sort_values(by=TIME_COLNAME, inplace=True)\n",
"\n",
"# remove the Covid period\n",
"df = df.query('{} <= \"{}\"'.format(TIME_COLNAME, COVID_PERIOD_START))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set parameters\n",
"\n",
"The first set of parameters is based on the analysis performed in the `auto-ml-forecasting-univariate-recipe-experiment-settings` notebook. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# set parameters based on the settings notebook analysis\n",
"DIFFERENCE_SERIES = True\n",
"TARGET_LAGS = None\n",
"STL_TYPE = None"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Next, define additional parameters to be used in the <a href=\"https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.automlconfig?view=azure-ml-py\"> AutoML config </a> class.\n",
"\n",
"<ul> \n",
" <li> FORECAST_HORIZON: The forecast horizon is the number of periods into the future that the model should predict. Here, we set the horizon to 12 periods (i.e. 12 quarters). For more discussion of forecast horizons and guiding principles for setting them, please see the <a href=\"https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand\"> energy demand notebook </a>. \n",
" </li>\n",
" <li> TIME_SERIES_ID_COLNAMES: The names of columns used to group a timeseries. It can be used to create multiple series. If time series identifier is not defined, the data set is assumed to be one time-series. This parameter is used with task type forecasting. Since we are working with a single series, this list is empty.\n",
" </li>\n",
" <li> BLOCKED_MODELS: Optional list of models to be blocked from consideration during model selection stage. At this point we want to consider all ML and Time Series models.\n",
" <ul>\n",
" <li> See the following <a href=\"https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.constants.supportedmodels.forecasting?view=azure-ml-py\"> link </a> for a list of supported Forecasting models</li>\n",
" </ul>\n",
" </li>\n",
"</ul>\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# set other parameters\n",
"FORECAST_HORIZON = 12\n",
"TIME_SERIES_ID_COLNAMES = []\n",
"BLOCKED_MODELS = []"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To run AutoML, you also need to create an **Experiment**. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# choose a name for the run history container in the workspace\n",
"if isinstance(TARGET_LAGS, list):\n",
" TARGET_LAGS_STR = (\n",
" \"-\".join(map(str, TARGET_LAGS)) if (len(TARGET_LAGS) > 0) else None\n",
" )\n",
"else:\n",
" TARGET_LAGS_STR = TARGET_LAGS\n",
"\n",
"experiment_desc = \"diff-{}_lags-{}_STL-{}\".format(\n",
" DIFFERENCE_SERIES, TARGET_LAGS_STR, STL_TYPE\n",
")\n",
"experiment_name = \"alcohol_{}\".format(experiment_desc)\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output[\"SDK version\"] = azureml.core.VERSION\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"SKU\"] = ws.sku\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"print(outputDf.T)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# create output directory\n",
"output_dir = \"experiment_output/{}\".format(experiment_desc)\n",
"if not os.path.exists(output_dir):\n",
" os.makedirs(output_dir)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# difference data and test for unit root\n",
"if DIFFERENCE_SERIES:\n",
" df_delta = df.copy()\n",
" df_delta[TARGET_COLNAME] = df[TARGET_COLNAME].diff()\n",
" df_delta.dropna(axis=0, inplace=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# split the data into train and test set\n",
"if DIFFERENCE_SERIES:\n",
" # generate train/inference sets using data in first differences\n",
" df_train, df_test = ts_train_test_split(\n",
" df_input=df_delta,\n",
" n=FORECAST_HORIZON,\n",
" time_colname=TIME_COLNAME,\n",
" ts_id_colnames=TIME_SERIES_ID_COLNAMES,\n",
" )\n",
"else:\n",
" df_train, df_test = ts_train_test_split(\n",
" df_input=df,\n",
" n=FORECAST_HORIZON,\n",
" time_colname=TIME_COLNAME,\n",
" ts_id_colnames=TIME_SERIES_ID_COLNAMES,\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Upload files to the Datastore\n",
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace) is paired with the storage account, which contains the default data store. We will use it to upload the bike share data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_train.to_csv(\"train.csv\", index=False)\n",
"df_test.to_csv(\"test.csv\", index=False)\n",
"\n",
"datastore = ws.get_default_datastore()\n",
"datastore.upload_files(\n",
" files=[\"./train.csv\"],\n",
" target_path=\"uni-recipe-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./test.csv\"],\n",
" target_path=\"uni-recipe-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"\n",
"from azureml.core import Dataset\n",
"\n",
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"uni-recipe-dataset/tabular/train.csv\")]\n",
")\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"uni-recipe-dataset/tabular/test.csv\")]\n",
")\n",
"\n",
"# print the first 5 rows of the Dataset\n",
"train_dataset.to_pandas_dataframe().reset_index(drop=True).head(5)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Config AutoML"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"time_series_settings = {\n",
" \"time_column_name\": TIME_COLNAME,\n",
" \"forecast_horizon\": FORECAST_HORIZON,\n",
" \"target_lags\": TARGET_LAGS,\n",
" \"use_stl\": STL_TYPE,\n",
" \"blocked_models\": BLOCKED_MODELS,\n",
" \"time_series_id_column_names\": TIME_SERIES_ID_COLNAMES,\n",
"}\n",
"\n",
"automl_config = AutoMLConfig(\n",
" task=\"forecasting\",\n",
" debug_log=\"sample_experiment.log\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n",
" experiment_timeout_minutes=20,\n",
" iteration_timeout_minutes=5,\n",
" enable_early_stopping=True,\n",
" training_data=train_dataset,\n",
" label_column_name=TARGET_COLNAME,\n",
" n_cross_validations=5,\n",
" verbosity=logging.INFO,\n",
" max_cores_per_iteration=-1,\n",
" compute_target=compute_target,\n",
" **time_series_settings,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We will now run the experiment, you can go to Azure ML portal to view the run details."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run = experiment.submit(automl_config, show_output=False)\n",
"remote_run.wait_for_completion()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Retrieve the Best Run details\n",
"Below we retrieve the best Run object from among all the runs in the experiment."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_run = remote_run.get_best_child()\n",
"best_run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Inference\n",
"\n",
"We now use the best fitted model from the AutoML Run to make forecasts for the test set. We will do batch scoring on the test dataset which should have the same schema as training dataset.\n",
"\n",
"The inference will run on a remote compute. In this example, it will re-use the training compute."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_experiment = Experiment(ws, experiment_name + \"_inference\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Retreiving forecasts from the model\n",
"We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from run_forecast import run_remote_inference\n",
"\n",
"remote_run = run_remote_inference(\n",
" test_experiment=test_experiment,\n",
" compute_target=compute_target,\n",
" train_run=best_run,\n",
" test_dataset=test_dataset,\n",
" target_column_name=TARGET_COLNAME,\n",
")\n",
"remote_run.wait_for_completion(show_output=False)\n",
"\n",
"remote_run.download_file(\"outputs/predictions.csv\", f\"{output_dir}/predictions.csv\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Download the prediction result for metrics calcuation\n",
"The test data with predictions are saved in artifact `outputs/predictions.csv`. We will use it to calculate accuracy metrics and vizualize predictions versus actuals."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_trans = pd.read_csv(f\"{output_dir}/predictions.csv\", parse_dates=[TIME_COLNAME])\n",
"X_trans.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# convert forecast in differences to levels\n",
"def convert_fcst_diff_to_levels(fcst, yt, df_orig):\n",
" \"\"\"Convert forecast from first differences to levels.\"\"\"\n",
" fcst = fcst.reset_index(drop=False, inplace=False)\n",
" fcst[\"predicted_level\"] = fcst[\"predicted\"].cumsum()\n",
" fcst[\"predicted_level\"] = fcst[\"predicted_level\"].astype(float) + float(yt)\n",
" # merge actuals\n",
" out = pd.merge(\n",
" fcst, df_orig[[TIME_COLNAME, TARGET_COLNAME]], on=[TIME_COLNAME], how=\"inner\"\n",
" )\n",
" out.rename(columns={TARGET_COLNAME: \"actual_level\"}, inplace=True)\n",
" return out"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"if DIFFERENCE_SERIES:\n",
" # convert forecast in differences to the levels\n",
" INFORMATION_SET_DATE = max(df_train[TIME_COLNAME])\n",
" YT = df.query(\"{} == @INFORMATION_SET_DATE\".format(TIME_COLNAME))[TARGET_COLNAME]\n",
"\n",
" fcst_df = convert_fcst_diff_to_levels(fcst=X_trans, yt=YT, df_orig=df)\n",
"else:\n",
" fcst_df = X_trans.copy()\n",
" fcst_df[\"actual_level\"] = y_test\n",
" fcst_df[\"predicted_level\"] = y_predictions\n",
"\n",
"del X_trans"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Calculate metrics and save output"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# compute metrics\n",
"metrics_df = compute_metrics(fcst_df=fcst_df, metric_name=None, ts_id_colnames=None)\n",
"# save output\n",
"metrics_file_name = \"{}_metrics.csv\".format(experiment_name)\n",
"fcst_file_name = \"{}_forecst.csv\".format(experiment_name)\n",
"plot_file_name = \"{}_plot.pdf\".format(experiment_name)\n",
"\n",
"metrics_df.to_csv(os.path.join(output_dir, metrics_file_name), index=True)\n",
"fcst_df.to_csv(os.path.join(output_dir, fcst_file_name), index=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Generate and save visuals"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plot_df = df.query('{} > \"2010-01-01\"'.format(TIME_COLNAME))\n",
"plot_df.set_index(TIME_COLNAME, inplace=True)\n",
"fcst_df.set_index(TIME_COLNAME, inplace=True)\n",
"\n",
"# generate and save plots\n",
"fig, ax = plt.subplots(dpi=180)\n",
"ax.plot(plot_df[TARGET_COLNAME], \"-g\", label=\"Historical\")\n",
"ax.plot(fcst_df[\"actual_level\"], \"-b\", label=\"Actual\")\n",
"ax.plot(fcst_df[\"predicted_level\"], \"-r\", label=\"Forecast\")\n",
"ax.legend()\n",
"ax.set_title(\"Forecast vs Actuals\")\n",
"ax.set_xlabel(TIME_COLNAME)\n",
"ax.set_ylabel(TARGET_COLNAME)\n",
"locs, labels = plt.xticks()\n",
"\n",
"plt.setp(labels, rotation=45)\n",
"plt.savefig(os.path.join(output_dir, plot_file_name))"
]
}
],
"metadata": {
"authors": [
{
"name": "vlbejan"
}
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-univariate-recipe-run-experiment
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,350 @@
observation_date,S4248SM144SCEN
1992-01-01,4302
1992-02-01,4323
1992-03-01,4199
1992-04-01,4397
1992-05-01,4159
1992-06-01,4091
1992-07-01,4109
1992-08-01,4116
1992-09-01,4093
1992-10-01,4095
1992-11-01,4169
1992-12-01,4169
1993-01-01,4124
1993-02-01,4107
1993-03-01,4168
1993-04-01,4254
1993-05-01,4290
1993-06-01,4163
1993-07-01,4274
1993-08-01,4253
1993-09-01,4312
1993-10-01,4296
1993-11-01,4221
1993-12-01,4233
1994-01-01,4218
1994-02-01,4237
1994-03-01,4343
1994-04-01,4357
1994-05-01,4264
1994-06-01,4392
1994-07-01,4381
1994-08-01,4290
1994-09-01,4348
1994-10-01,4357
1994-11-01,4417
1994-12-01,4411
1995-01-01,4417
1995-02-01,4339
1995-03-01,4256
1995-04-01,4276
1995-05-01,4290
1995-06-01,4413
1995-07-01,4305
1995-08-01,4476
1995-09-01,4393
1995-10-01,4447
1995-11-01,4492
1995-12-01,4489
1996-01-01,4635
1996-02-01,4697
1996-03-01,4588
1996-04-01,4633
1996-05-01,4685
1996-06-01,4672
1996-07-01,4666
1996-08-01,4726
1996-09-01,4571
1996-10-01,4624
1996-11-01,4691
1996-12-01,4604
1997-01-01,4657
1997-02-01,4711
1997-03-01,4810
1997-04-01,4626
1997-05-01,4860
1997-06-01,4757
1997-07-01,4916
1997-08-01,4921
1997-09-01,4985
1997-10-01,4905
1997-11-01,4880
1997-12-01,5165
1998-01-01,4885
1998-02-01,4925
1998-03-01,5049
1998-04-01,5090
1998-05-01,5094
1998-06-01,4929
1998-07-01,5132
1998-08-01,5061
1998-09-01,5471
1998-10-01,5327
1998-11-01,5257
1998-12-01,5354
1999-01-01,5427
1999-02-01,5415
1999-03-01,5387
1999-04-01,5483
1999-05-01,5510
1999-06-01,5539
1999-07-01,5532
1999-08-01,5625
1999-09-01,5799
1999-10-01,5843
1999-11-01,5836
1999-12-01,5724
2000-01-01,5757
2000-02-01,5731
2000-03-01,5839
2000-04-01,5825
2000-05-01,5877
2000-06-01,5979
2000-07-01,5828
2000-08-01,6016
2000-09-01,6113
2000-10-01,6150
2000-11-01,6111
2000-12-01,6088
2001-01-01,6360
2001-02-01,6300
2001-03-01,5935
2001-04-01,6204
2001-05-01,6164
2001-06-01,6231
2001-07-01,6336
2001-08-01,6179
2001-09-01,6120
2001-10-01,6134
2001-11-01,6381
2001-12-01,6521
2002-01-01,6333
2002-02-01,6541
2002-03-01,6692
2002-04-01,6591
2002-05-01,6554
2002-06-01,6596
2002-07-01,6620
2002-08-01,6577
2002-09-01,6625
2002-10-01,6441
2002-11-01,6584
2002-12-01,6923
2003-01-01,6600
2003-02-01,6742
2003-03-01,6831
2003-04-01,6782
2003-05-01,6714
2003-06-01,6736
2003-07-01,7146
2003-08-01,7027
2003-09-01,6896
2003-10-01,7107
2003-11-01,6997
2003-12-01,7075
2004-01-01,7235
2004-02-01,7072
2004-03-01,6968
2004-04-01,7144
2004-05-01,7232
2004-06-01,7095
2004-07-01,7181
2004-08-01,7146
2004-09-01,7230
2004-10-01,7327
2004-11-01,7328
2004-12-01,7425
2005-01-01,7520
2005-02-01,7551
2005-03-01,7572
2005-04-01,7701
2005-05-01,7819
2005-06-01,7770
2005-07-01,7627
2005-08-01,7816
2005-09-01,7718
2005-10-01,7772
2005-11-01,7788
2005-12-01,7576
2006-01-01,7940
2006-02-01,8027
2006-03-01,7884
2006-04-01,8043
2006-05-01,7995
2006-06-01,8218
2006-07-01,8159
2006-08-01,8331
2006-09-01,8320
2006-10-01,8397
2006-11-01,8603
2006-12-01,8515
2007-01-01,8336
2007-02-01,8233
2007-03-01,8475
2007-04-01,8310
2007-05-01,8583
2007-06-01,8645
2007-07-01,8713
2007-08-01,8636
2007-09-01,8791
2007-10-01,8984
2007-11-01,8867
2007-12-01,9059
2008-01-01,8911
2008-02-01,8701
2008-03-01,8956
2008-04-01,9095
2008-05-01,9102
2008-06-01,9170
2008-07-01,9194
2008-08-01,9164
2008-09-01,9337
2008-10-01,9186
2008-11-01,9029
2008-12-01,9025
2009-01-01,9486
2009-02-01,9219
2009-03-01,9059
2009-04-01,9171
2009-05-01,9114
2009-06-01,8926
2009-07-01,9150
2009-08-01,9105
2009-09-01,9011
2009-10-01,8743
2009-11-01,8958
2009-12-01,8969
2010-01-01,8984
2010-02-01,9068
2010-03-01,9335
2010-04-01,9481
2010-05-01,9132
2010-06-01,9192
2010-07-01,9123
2010-08-01,9091
2010-09-01,9155
2010-10-01,9556
2010-11-01,9477
2010-12-01,9436
2011-01-01,9519
2011-02-01,9667
2011-03-01,9668
2011-04-01,9628
2011-05-01,9376
2011-06-01,9830
2011-07-01,9626
2011-08-01,9802
2011-09-01,9858
2011-10-01,9838
2011-11-01,9846
2011-12-01,9789
2012-01-01,9955
2012-02-01,9909
2012-03-01,9897
2012-04-01,9909
2012-05-01,10127
2012-06-01,10175
2012-07-01,10129
2012-08-01,10251
2012-09-01,10227
2012-10-01,10174
2012-11-01,10402
2012-12-01,10664
2013-01-01,10585
2013-02-01,10661
2013-03-01,10649
2013-04-01,10676
2013-05-01,10863
2013-06-01,10690
2013-07-01,11007
2013-08-01,10835
2013-09-01,10900
2013-10-01,10749
2013-11-01,10946
2013-12-01,10864
2014-01-01,10726
2014-02-01,10821
2014-03-01,10789
2014-04-01,10892
2014-05-01,10892
2014-06-01,10789
2014-07-01,10662
2014-08-01,10767
2014-09-01,10779
2014-10-01,10922
2014-11-01,10662
2014-12-01,10808
2015-01-01,10865
2015-02-01,10740
2015-03-01,10917
2015-04-01,10933
2015-05-01,11074
2015-06-01,11108
2015-07-01,11493
2015-08-01,11386
2015-09-01,11502
2015-10-01,11487
2015-11-01,11375
2015-12-01,11445
2016-01-01,11787
2016-02-01,11792
2016-03-01,11649
2016-04-01,11810
2016-05-01,11496
2016-06-01,11600
2016-07-01,11503
2016-08-01,11715
2016-09-01,11732
2016-10-01,11885
2016-11-01,12092
2016-12-01,11857
2017-01-01,11881
2017-02-01,12355
2017-03-01,12027
2017-04-01,12183
2017-05-01,12170
2017-06-01,12387
2017-07-01,12041
2017-08-01,12139
2017-09-01,11861
2017-10-01,12202
2017-11-01,12178
2017-12-01,12126
2018-01-01,11942
2018-02-01,12206
2018-03-01,12362
2018-04-01,12287
2018-05-01,12497
2018-06-01,12621
2018-07-01,12729
2018-08-01,12689
2018-09-01,12874
2018-10-01,12776
2018-11-01,12995
2018-12-01,13291
2019-01-01,13364
2019-02-01,13135
2019-03-01,13123
2019-04-01,13110
2019-05-01,13152
2019-06-01,13201
2019-07-01,13354
2019-08-01,13427
2019-09-01,13472
2019-10-01,13436
2019-11-01,13430
2019-12-01,13588
2020-01-01,13533
2020-02-01,13575
2020-03-01,13867
2020-04-01,12319
2020-05-01,13909
2020-06-01,13982
2020-07-01,15384
2020-08-01,15701
2020-09-01,15006
2020-10-01,15741
2020-11-01,14934
2020-12-01,13061
2021-01-01,15743
1 observation_date S4248SM144SCEN
2 1992-01-01 4302
3 1992-02-01 4323
4 1992-03-01 4199
5 1992-04-01 4397
6 1992-05-01 4159
7 1992-06-01 4091
8 1992-07-01 4109
9 1992-08-01 4116
10 1992-09-01 4093
11 1992-10-01 4095
12 1992-11-01 4169
13 1992-12-01 4169
14 1993-01-01 4124
15 1993-02-01 4107
16 1993-03-01 4168
17 1993-04-01 4254
18 1993-05-01 4290
19 1993-06-01 4163
20 1993-07-01 4274
21 1993-08-01 4253
22 1993-09-01 4312
23 1993-10-01 4296
24 1993-11-01 4221
25 1993-12-01 4233
26 1994-01-01 4218
27 1994-02-01 4237
28 1994-03-01 4343
29 1994-04-01 4357
30 1994-05-01 4264
31 1994-06-01 4392
32 1994-07-01 4381
33 1994-08-01 4290
34 1994-09-01 4348
35 1994-10-01 4357
36 1994-11-01 4417
37 1994-12-01 4411
38 1995-01-01 4417
39 1995-02-01 4339
40 1995-03-01 4256
41 1995-04-01 4276
42 1995-05-01 4290
43 1995-06-01 4413
44 1995-07-01 4305
45 1995-08-01 4476
46 1995-09-01 4393
47 1995-10-01 4447
48 1995-11-01 4492
49 1995-12-01 4489
50 1996-01-01 4635
51 1996-02-01 4697
52 1996-03-01 4588
53 1996-04-01 4633
54 1996-05-01 4685
55 1996-06-01 4672
56 1996-07-01 4666
57 1996-08-01 4726
58 1996-09-01 4571
59 1996-10-01 4624
60 1996-11-01 4691
61 1996-12-01 4604
62 1997-01-01 4657
63 1997-02-01 4711
64 1997-03-01 4810
65 1997-04-01 4626
66 1997-05-01 4860
67 1997-06-01 4757
68 1997-07-01 4916
69 1997-08-01 4921
70 1997-09-01 4985
71 1997-10-01 4905
72 1997-11-01 4880
73 1997-12-01 5165
74 1998-01-01 4885
75 1998-02-01 4925
76 1998-03-01 5049
77 1998-04-01 5090
78 1998-05-01 5094
79 1998-06-01 4929
80 1998-07-01 5132
81 1998-08-01 5061
82 1998-09-01 5471
83 1998-10-01 5327
84 1998-11-01 5257
85 1998-12-01 5354
86 1999-01-01 5427
87 1999-02-01 5415
88 1999-03-01 5387
89 1999-04-01 5483
90 1999-05-01 5510
91 1999-06-01 5539
92 1999-07-01 5532
93 1999-08-01 5625
94 1999-09-01 5799
95 1999-10-01 5843
96 1999-11-01 5836
97 1999-12-01 5724
98 2000-01-01 5757
99 2000-02-01 5731
100 2000-03-01 5839
101 2000-04-01 5825
102 2000-05-01 5877
103 2000-06-01 5979
104 2000-07-01 5828
105 2000-08-01 6016
106 2000-09-01 6113
107 2000-10-01 6150
108 2000-11-01 6111
109 2000-12-01 6088
110 2001-01-01 6360
111 2001-02-01 6300
112 2001-03-01 5935
113 2001-04-01 6204
114 2001-05-01 6164
115 2001-06-01 6231
116 2001-07-01 6336
117 2001-08-01 6179
118 2001-09-01 6120
119 2001-10-01 6134
120 2001-11-01 6381
121 2001-12-01 6521
122 2002-01-01 6333
123 2002-02-01 6541
124 2002-03-01 6692
125 2002-04-01 6591
126 2002-05-01 6554
127 2002-06-01 6596
128 2002-07-01 6620
129 2002-08-01 6577
130 2002-09-01 6625
131 2002-10-01 6441
132 2002-11-01 6584
133 2002-12-01 6923
134 2003-01-01 6600
135 2003-02-01 6742
136 2003-03-01 6831
137 2003-04-01 6782
138 2003-05-01 6714
139 2003-06-01 6736
140 2003-07-01 7146
141 2003-08-01 7027
142 2003-09-01 6896
143 2003-10-01 7107
144 2003-11-01 6997
145 2003-12-01 7075
146 2004-01-01 7235
147 2004-02-01 7072
148 2004-03-01 6968
149 2004-04-01 7144
150 2004-05-01 7232
151 2004-06-01 7095
152 2004-07-01 7181
153 2004-08-01 7146
154 2004-09-01 7230
155 2004-10-01 7327
156 2004-11-01 7328
157 2004-12-01 7425
158 2005-01-01 7520
159 2005-02-01 7551
160 2005-03-01 7572
161 2005-04-01 7701
162 2005-05-01 7819
163 2005-06-01 7770
164 2005-07-01 7627
165 2005-08-01 7816
166 2005-09-01 7718
167 2005-10-01 7772
168 2005-11-01 7788
169 2005-12-01 7576
170 2006-01-01 7940
171 2006-02-01 8027
172 2006-03-01 7884
173 2006-04-01 8043
174 2006-05-01 7995
175 2006-06-01 8218
176 2006-07-01 8159
177 2006-08-01 8331
178 2006-09-01 8320
179 2006-10-01 8397
180 2006-11-01 8603
181 2006-12-01 8515
182 2007-01-01 8336
183 2007-02-01 8233
184 2007-03-01 8475
185 2007-04-01 8310
186 2007-05-01 8583
187 2007-06-01 8645
188 2007-07-01 8713
189 2007-08-01 8636
190 2007-09-01 8791
191 2007-10-01 8984
192 2007-11-01 8867
193 2007-12-01 9059
194 2008-01-01 8911
195 2008-02-01 8701
196 2008-03-01 8956
197 2008-04-01 9095
198 2008-05-01 9102
199 2008-06-01 9170
200 2008-07-01 9194
201 2008-08-01 9164
202 2008-09-01 9337
203 2008-10-01 9186
204 2008-11-01 9029
205 2008-12-01 9025
206 2009-01-01 9486
207 2009-02-01 9219
208 2009-03-01 9059
209 2009-04-01 9171
210 2009-05-01 9114
211 2009-06-01 8926
212 2009-07-01 9150
213 2009-08-01 9105
214 2009-09-01 9011
215 2009-10-01 8743
216 2009-11-01 8958
217 2009-12-01 8969
218 2010-01-01 8984
219 2010-02-01 9068
220 2010-03-01 9335
221 2010-04-01 9481
222 2010-05-01 9132
223 2010-06-01 9192
224 2010-07-01 9123
225 2010-08-01 9091
226 2010-09-01 9155
227 2010-10-01 9556
228 2010-11-01 9477
229 2010-12-01 9436
230 2011-01-01 9519
231 2011-02-01 9667
232 2011-03-01 9668
233 2011-04-01 9628
234 2011-05-01 9376
235 2011-06-01 9830
236 2011-07-01 9626
237 2011-08-01 9802
238 2011-09-01 9858
239 2011-10-01 9838
240 2011-11-01 9846
241 2011-12-01 9789
242 2012-01-01 9955
243 2012-02-01 9909
244 2012-03-01 9897
245 2012-04-01 9909
246 2012-05-01 10127
247 2012-06-01 10175
248 2012-07-01 10129
249 2012-08-01 10251
250 2012-09-01 10227
251 2012-10-01 10174
252 2012-11-01 10402
253 2012-12-01 10664
254 2013-01-01 10585
255 2013-02-01 10661
256 2013-03-01 10649
257 2013-04-01 10676
258 2013-05-01 10863
259 2013-06-01 10690
260 2013-07-01 11007
261 2013-08-01 10835
262 2013-09-01 10900
263 2013-10-01 10749
264 2013-11-01 10946
265 2013-12-01 10864
266 2014-01-01 10726
267 2014-02-01 10821
268 2014-03-01 10789
269 2014-04-01 10892
270 2014-05-01 10892
271 2014-06-01 10789
272 2014-07-01 10662
273 2014-08-01 10767
274 2014-09-01 10779
275 2014-10-01 10922
276 2014-11-01 10662
277 2014-12-01 10808
278 2015-01-01 10865
279 2015-02-01 10740
280 2015-03-01 10917
281 2015-04-01 10933
282 2015-05-01 11074
283 2015-06-01 11108
284 2015-07-01 11493
285 2015-08-01 11386
286 2015-09-01 11502
287 2015-10-01 11487
288 2015-11-01 11375
289 2015-12-01 11445
290 2016-01-01 11787
291 2016-02-01 11792
292 2016-03-01 11649
293 2016-04-01 11810
294 2016-05-01 11496
295 2016-06-01 11600
296 2016-07-01 11503
297 2016-08-01 11715
298 2016-09-01 11732
299 2016-10-01 11885
300 2016-11-01 12092
301 2016-12-01 11857
302 2017-01-01 11881
303 2017-02-01 12355
304 2017-03-01 12027
305 2017-04-01 12183
306 2017-05-01 12170
307 2017-06-01 12387
308 2017-07-01 12041
309 2017-08-01 12139
310 2017-09-01 11861
311 2017-10-01 12202
312 2017-11-01 12178
313 2017-12-01 12126
314 2018-01-01 11942
315 2018-02-01 12206
316 2018-03-01 12362
317 2018-04-01 12287
318 2018-05-01 12497
319 2018-06-01 12621
320 2018-07-01 12729
321 2018-08-01 12689
322 2018-09-01 12874
323 2018-10-01 12776
324 2018-11-01 12995
325 2018-12-01 13291
326 2019-01-01 13364
327 2019-02-01 13135
328 2019-03-01 13123
329 2019-04-01 13110
330 2019-05-01 13152
331 2019-06-01 13201
332 2019-07-01 13354
333 2019-08-01 13427
334 2019-09-01 13472
335 2019-10-01 13436
336 2019-11-01 13430
337 2019-12-01 13588
338 2020-01-01 13533
339 2020-02-01 13575
340 2020-03-01 13867
341 2020-04-01 12319
342 2020-05-01 13909
343 2020-06-01 13982
344 2020-07-01 15384
345 2020-08-01 15701
346 2020-09-01 15006
347 2020-10-01 15741
348 2020-11-01 14934
349 2020-12-01 13061
350 2021-01-01 15743

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

View File

@@ -0,0 +1,70 @@
"""
This is the script that is executed on the compute instance. It relies
on the model.pkl file which is uploaded along with this script to the
compute instance.
"""
import argparse
from azureml.core import Dataset, Run
from azureml.automl.core.shared.constants import TimeSeriesInternal
from sklearn.externals import joblib
parser = argparse.ArgumentParser()
parser.add_argument(
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
)
args = parser.parse_args()
target_column_name = args.target_column_name
test_dataset_id = args.test_dataset
run = Run.get_context()
ws = run.experiment.workspace
# get the input dataset by id
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
X_test = (
test_dataset.drop_columns(columns=[target_column_name])
.to_pandas_dataframe()
.reset_index(drop=True)
)
y_test_df = (
test_dataset.with_timestamp_columns(None)
.keep_columns(columns=[target_column_name])
.to_pandas_dataframe()
)
# generate forecast
fitted_model = joblib.load("model.pkl")
# We have default quantiles values set as below(95th percentile)
quantiles = [0.025, 0.5, 0.975]
predicted_column_name = "predicted"
PI = "prediction_interval"
fitted_model.quantiles = quantiles
pred_quantiles = fitted_model.forecast_quantiles(X_test)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
)
X_test[target_column_name] = y_test_df[target_column_name]
X_test[PI] = pred_quantiles[PI]
X_test[predicted_column_name] = pred_quantiles[0.5]
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = X_test[
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
clean.rename(columns={target_column_name: "actual"}, inplace=True)
file_name = "outputs/predictions.csv"
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
# Upload the predictions into artifacts
run.upload_file(name=file_name, path_or_stream=file_name)

View File

@@ -0,0 +1,263 @@
"""
Helper functions to determine AutoML experiment settings for forecasting.
"""
import pandas as pd
import statsmodels.tsa.stattools as stattools
from arch import unitroot
from azureml.automl.core.shared import constants
from azureml.automl.runtime.shared.score import scoring
def adf_test(series, **kw):
"""
Wrapper for the augmented Dickey-Fuller test. Allows users to set the lag order.
:param series: series to test
:return: dictionary of results
"""
if "lags" in kw.keys():
msg = "Lag order of {} detected. Running the ADF test...".format(
str(kw["lags"])
)
print(msg)
statistic, pval, critval, resstore = stattools.adfuller(
series, maxlag=kw["lags"], autolag=kw["autolag"], store=kw["store"]
)
else:
statistic, pval, critval, resstore = stattools.adfuller(
series, autolag=kw["IC"], store=kw["store"]
)
output = {
"statistic": statistic,
"pval": pval,
"critical": critval,
"resstore": resstore,
}
return output
def kpss_test(series, **kw):
"""
Wrapper for the KPSS test. Allows users to set the lag order.
:param series: series to test
:return: dictionary of results
"""
if kw["store"]:
statistic, p_value, critical_values, rstore = stattools.kpss(
series, regression=kw["reg_type"], lags=kw["lags"], store=kw["store"]
)
else:
statistic, p_value, lags, critical_values = stattools.kpss(
series, regression=kw["reg_type"], lags=kw["lags"]
)
output = {
"statistic": statistic,
"pval": p_value,
"critical": critical_values,
"lags": rstore.lags if kw["store"] else lags,
}
if kw["store"]:
output.update({"resstore": rstore})
return output
def format_test_output(test_name, test_res, H0_unit_root=True):
"""
Helper function to format output. Return a dictionary with specific keys. Will be used to
construct the summary data frame for all unit root tests.
TODO: Add functionality of choosing based on the max lag order specified by user.
:param test_name: name of the test
:param test_res: object that contains corresponding test information. Can be None if test failed.
:param H0_unit_root: does the null hypothesis of the test assume a unit root process? Some tests do (ADF),
some don't (KPSS).
:return: dictionary of summary table for all tests and final decision on stationary vs non-stationary.
If test failed (test_res is None), return empty dictionary.
"""
# Check if the test failed by trying to extract the test statistic
if test_name in ("ADF", "KPSS"):
try:
test_res["statistic"]
except BaseException:
test_res = None
else:
try:
test_res.stat
except BaseException:
test_res = None
if test_res is None:
return {}
# extract necessary information
if test_name in ("ADF", "KPSS"):
statistic = test_res["statistic"]
crit_val = test_res["critical"]["5%"]
p_val = test_res["pval"]
lags = test_res["resstore"].usedlag if test_name == "ADF" else test_res["lags"]
else:
statistic = test_res.stat
crit_val = test_res.critical_values["5%"]
p_val = test_res.pvalue
lags = test_res.lags
if H0_unit_root:
H0 = "The process is non-stationary"
stationary = "yes" if p_val < 0.05 else "not"
else:
H0 = "The process is stationary"
stationary = "yes" if p_val > 0.05 else "not"
out = {
"test_name": test_name,
"statistic": statistic,
"crit_val": crit_val,
"p_val": p_val,
"lags": int(lags),
"stationary": stationary,
"Null Hypothesis": H0,
}
return out
def unit_root_test_wrapper(series, lags=None):
"""
Main function to run multiple stationarity tests. Runs five tests and returns a summary table + decision
based on the majority rule. If the number of tests that determine a series is stationary equals to the
number of tests that deem it non-stationary, we assume the series is non-stationary.
* Augmented Dickey-Fuller (ADF),
* KPSS,
* ADF using GLS,
* Phillips-Perron (PP),
* Zivot-Andrews (ZA)
:param lags: (optional) parameter that allows user to run a series of tests for a specific lag value.
:param series: series to test
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
"""
# setting for ADF and KPSS tests
adf_settings = {"IC": "AIC", "store": True}
kpss_settings = {"reg_type": "c", "lags": "auto", "store": True}
arch_test_settings = {} # settings for PP, ADF GLS and ZA tests
if lags is not None:
adf_settings.update({"lags": lags, "autolag": None})
kpss_settings.update({"lags:": lags})
arch_test_settings = {"lags": lags}
# Run individual tests
adf = adf_test(series, **adf_settings) # ADF test
kpss = kpss_test(series, **kpss_settings) # KPSS test
pp = unitroot.PhillipsPerron(series, **arch_test_settings) # Phillips-Perron test
adfgls = unitroot.DFGLS(series, **arch_test_settings) # ADF using GLS test
za = unitroot.ZivotAndrews(series, **arch_test_settings) # Zivot-Andrews test
# generate output table
adf_dict = format_test_output(test_name="ADF", test_res=adf, H0_unit_root=True)
kpss_dict = format_test_output(test_name="KPSS", test_res=kpss, H0_unit_root=False)
pp_dict = format_test_output(
test_name="Philips Perron", test_res=pp, H0_unit_root=True
)
adfgls_dict = format_test_output(
test_name="ADF GLS", test_res=adfgls, H0_unit_root=True
)
za_dict = format_test_output(
test_name="Zivot-Andrews", test_res=za, H0_unit_root=True
)
test_dict = {
"ADF": adf_dict,
"KPSS": kpss_dict,
"PP": pp_dict,
"ADF GLS": adfgls_dict,
"ZA": za_dict,
}
test_sum = pd.DataFrame.from_dict(test_dict, orient="index").reset_index(drop=True)
# decision based on the majority rule
if test_sum.shape[0] > 0:
ratio = test_sum[test_sum["stationary"] == "yes"].shape[0] / test_sum.shape[0]
else:
ratio = 1 # all tests fail, assume the series is stationary
# Majority rule. If the ratio is exactly 0.5, assume the series in non-stationary.
stationary = "YES" if (ratio > 0.5) else "NO"
out = {"summary": test_sum, "stationary": stationary}
return out
def ts_train_test_split(df_input, n, time_colname, ts_id_colnames=None):
"""
Group data frame by time series ID and split on last n rows for each group.
:param df_input: input data frame
:param n: number of observations in the test set
:param time_colname: time column
:param ts_id_colnames: (optional) list of grain column names
:return train and test data frames
"""
if ts_id_colnames is None:
ts_id_colnames = []
ts_id_colnames_original = ts_id_colnames.copy()
if len(ts_id_colnames) == 0:
ts_id_colnames = ["Grain"]
df_input[ts_id_colnames[0]] = "dummy"
# Sort by ascending time
df_grouped = df_input.sort_values(time_colname).groupby(
ts_id_colnames, group_keys=False
)
df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n])
df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:])
# drop group column name if it was not originally provided
if len(ts_id_colnames_original) == 0:
df_head.drop(ts_id_colnames, axis=1, inplace=True)
df_tail.drop(ts_id_colnames, axis=1, inplace=True)
return df_head, df_tail
def compute_metrics(fcst_df, metric_name=None, ts_id_colnames=None):
"""
Calculate metrics per grain.
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
:param metric_name: (optional) name of the metric to return
:param ts_id_colnames: (optional) list of grain column names
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
"""
if ts_id_colnames is None:
ts_id_colnames = []
if len(ts_id_colnames) == 0:
ts_id_colnames = ["TS_ID"]
fcst_df[ts_id_colnames[0]] = "dummy"
metrics_list = []
for grain, df in fcst_df.groupby(ts_id_colnames):
try:
scores = scoring.score_regression(
y_test=df["actual_level"],
y_pred=df["predicted_level"],
metrics=list(constants.Metric.SCALAR_REGRESSION_SET),
)
except BaseException:
msg = "{}: metrics calculation failed.".format(grain)
print(msg)
scores = {}
one_grain_metrics_df = pd.DataFrame(
list(scores.items()), columns=["metric_name", "metric"]
).sort_values(["metric_name"])
one_grain_metrics_df.reset_index(inplace=True, drop=True)
if len(ts_id_colnames) < 2:
one_grain_metrics_df["grain"] = ts_id_colnames[0]
else:
one_grain_metrics_df["grain"] = "|".join(list(grain))
metrics_list.append(one_grain_metrics_df)
# collect into a data frame
grain_metrics = pd.concat(metrics_list)
if metric_name is not None:
grain_metrics = grain_metrics.query("metric_name == @metric_name")
return grain_metrics

View File

@@ -0,0 +1,49 @@
import os
import shutil
from azureml.core import ScriptRunConfig
def run_remote_inference(
test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
# Create local directory to copy the model.pkl and forecsting_script.py files into.
# These files will be uploaded to and executed on the compute instance.
os.makedirs(inference_folder, exist_ok=True)
shutil.copy("forecasting_script.py", inference_folder)
train_run.download_file(
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
)
inference_env = train_run.get_environment()
config = ScriptRunConfig(
source_directory=inference_folder,
script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name,
"--test_dataset",
test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target,
environment=inference_env,
)
run = test_experiment.submit(
config,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags["run_algorithm"])
return run

View File

@@ -0,0 +1,18 @@
---
page_type: sample
languages:
- python
products:
- azure-machine-learning
description: Notebook showing how to use Azure Machine Learning pipelines to do Batch Predictions with an Image Classification model trained using AutoML.
---
# Batch Scoring with an Image Classification Model
- Dataset: Toy dataset with images of products found in a fridge
- **[Jupyter Notebook](auto-ml-image-classification-multiclass-batch-scoring.ipynb)**
- register an Image Classification Multi-Class model already trained using AutoML
- create an Inference Dataset
- provision compute targets and create a Batch Scoring script
- use ParallelRunStep to do batch scoring
- build, run, and publish a pipeline
- enable a REST endpoint for the pipeline

View File

@@ -0,0 +1,950 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License.\n",
"\n",
"# Batch Predictions for an Image Classification model trained using AutoML\n",
"In this notebook, we go over how you can use [Azure Machine Learning pipelines](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-pipeline-batch-scoring-classification) to run a batch scoring image classification job.\n",
"\n",
"**Please note:** For this notebook you can use an existing image classification model trained using AutoML for Images or use the simple model training we included below for convenience. For detailed instructions on how to train an image classification model with AutoML, please refer to the official [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models) and to the [image classification multiclass notebook](https://github.com/Azure/azureml-examples/blob/main/python-sdk/tutorials/automl-with-azureml/image-classification-multiclass/auto-ml-image-classification-multiclass.ipynb)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Important:** This feature is currently in public preview. This preview version is provided without a service-level agreement. Certain features might not be supported or might have constrained capabilities. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/en-us/support/legal/preview-supplemental-terms/)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Environment Setup\n",
"Please follow the [\"Setup a new conda environment\"](https://github.com/Azure/azureml-examples/tree/main/python-sdk/tutorials/automl-with-azureml#3-setup-a-new-conda-environment) instructions to get started."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"\n",
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK.\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK.\")\n",
"assert (\n",
" azureml.core.VERSION >= \"1.35\"\n",
"), \"Please upgrade the Azure ML SDK by running '!pip install --upgrade azureml-sdk' then restart the kernel.\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## You will perform the following tasks:\n",
"\n",
"* Register a Model already trained using AutoML for Image Classification.\n",
"* Create an Inference Dataset.\n",
"* Provision compute targets and create a Batch Scoring script.\n",
"* Use ParallelRunStep to do batch scoring.\n",
"* Build, run, and publish a pipeline.\n",
"* Enable a REST endpoint for the pipeline."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Workspace setup\n",
"\n",
"An [Azure ML Workspace](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#workspace) is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models.\n",
"\n",
"Create an Azure ML Workspace within your Azure subscription or load an existing workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.workspace import Workspace\n",
"\n",
"ws = Workspace.from_config()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Workspace default datastore is used to store inference input images and outputs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def_data_store = ws.get_default_datastore()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compute target setup\n",
"You will need to provide a [Compute Target](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#computes) that will be used for your AutoML model training. AutoML models for image tasks require [GPU SKUs](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes-gpu) such as the ones from the NC, NCv2, NCv3, ND, NDv2 and NCasT4 series. We recommend using the NCsv3-series (with v100 GPUs) for faster training. Using a compute target with a multi-GPU VM SKU will leverage the multiple GPUs to speed up training. Additionally, setting up a compute target with multiple nodes will allow for faster model training by leveraging parallelism, when tuning hyperparameters for your model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
"\n",
"cluster_name = \"gpu-cluster-nc6\"\n",
"\n",
"try:\n",
" compute_target = ws.compute_targets[cluster_name]\n",
" print(\"Found existing compute target.\")\n",
"except KeyError:\n",
" print(\"Creating a new compute target...\")\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"Standard_NC6\",\n",
" idle_seconds_before_scaledown=600,\n",
" min_nodes=0,\n",
" max_nodes=4,\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
"# Can poll for a minimum number of nodes and for a specific timeout.\n",
"# If no min_node_count is provided, it will use the scale settings for the cluster.\n",
"compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train an Image Classification model\n",
"\n",
"In this section we will do a quick model train to use for the batch scoring. For a datailed example on how to train an image classification model, please refer to the official [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models) or to the [image classification multiclass notebook](https://github.com/Azure/azureml-examples/blob/main/python-sdk/tutorials/automl-with-azureml/image-classification-multiclass/auto-ml-image-classification-multiclass.ipynb). If you already have a model trained in the same workspace, you can skip to section [\"Create data objects\"](#Create-data-objects)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Experiment Setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment_name = \"automl-image-batchscoring\"\n",
"experiment = Experiment(ws, name=experiment_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Download dataset with input Training Data\n",
"\n",
"All images in this notebook are hosted in [this repository](https://github.com/microsoft/computervision-recipes) and are made available under the [MIT license](https://github.com/microsoft/computervision-recipes/blob/master/LICENSE)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import urllib\n",
"from zipfile import ZipFile\n",
"\n",
"# download data\n",
"download_url = \"https://cvbp-secondary.z19.web.core.windows.net/datasets/image_classification/fridgeObjects.zip\"\n",
"data_file = \"./fridgeObjects.zip\"\n",
"urllib.request.urlretrieve(download_url, filename=data_file)\n",
"\n",
"# extract files\n",
"with ZipFile(data_file, \"r\") as zip:\n",
" print(\"extracting files...\")\n",
" zip.extractall()\n",
" print(\"done\")\n",
"# delete zip file\n",
"os.remove(data_file)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Convert the downloaded data to JSONL"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import os\n",
"\n",
"src = \"./fridgeObjects/\"\n",
"train_validation_ratio = 5\n",
"\n",
"# Retrieving default datastore that got automatically created when we setup a workspace\n",
"workspaceblobstore = ws.get_default_datastore().name\n",
"\n",
"# Path to the training and validation files\n",
"train_annotations_file = os.path.join(src, \"train_annotations.jsonl\")\n",
"validation_annotations_file = os.path.join(src, \"validation_annotations.jsonl\")\n",
"\n",
"# sample json line dictionary\n",
"json_line_sample = {\n",
" \"image_url\": \"AmlDatastore://\"\n",
" + workspaceblobstore\n",
" + \"/\"\n",
" + os.path.basename(os.path.dirname(src)),\n",
" \"label\": \"\",\n",
"}\n",
"\n",
"index = 0\n",
"# Scan each sub directary and generate jsonl line\n",
"with open(train_annotations_file, \"w\") as train_f:\n",
" with open(validation_annotations_file, \"w\") as validation_f:\n",
" for className in os.listdir(src):\n",
" subDir = src + className\n",
" if not os.path.isdir(subDir):\n",
" continue\n",
" # Scan each sub directary\n",
" print(\"Parsing \" + subDir)\n",
" for image in os.listdir(subDir):\n",
" json_line = dict(json_line_sample)\n",
" json_line[\"image_url\"] += f\"/{className}/{image}\"\n",
" json_line[\"label\"] = className\n",
"\n",
" if index % train_validation_ratio == 0:\n",
" # validation annotation\n",
" validation_f.write(json.dumps(json_line) + \"\\n\")\n",
" else:\n",
" # train annotation\n",
" train_f.write(json.dumps(json_line) + \"\\n\")\n",
" index += 1"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Upload the JSONL file and images to Datastore"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Retrieving default datastore that got automatically created when we setup a workspace\n",
"ds = ws.get_default_datastore()\n",
"ds.upload(src_dir=\"./fridgeObjects\", target_path=\"fridgeObjects\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Create and register datasets in workspace"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"from azureml.data import DataType\n",
"\n",
"# get existing training dataset\n",
"training_dataset_name = \"fridgeObjectsTrainingDataset\"\n",
"if training_dataset_name in ws.datasets:\n",
" training_dataset = ws.datasets.get(training_dataset_name)\n",
" print(\"Found the training dataset\", training_dataset_name)\n",
"else:\n",
" # create training dataset\n",
" training_dataset = Dataset.Tabular.from_json_lines_files(\n",
" path=ds.path(\"fridgeObjects/train_annotations.jsonl\"),\n",
" set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n",
" )\n",
" training_dataset = training_dataset.register(\n",
" workspace=ws, name=training_dataset_name\n",
" )\n",
"# get existing validation dataset\n",
"validation_dataset_name = \"fridgeObjectsValidationDataset\"\n",
"if validation_dataset_name in ws.datasets:\n",
" validation_dataset = ws.datasets.get(validation_dataset_name)\n",
" print(\"Found the validation dataset\", validation_dataset_name)\n",
"else:\n",
" # create validation dataset\n",
" validation_dataset = Dataset.Tabular.from_json_lines_files(\n",
" path=ds.path(\"fridgeObjects/validation_annotations.jsonl\"),\n",
" set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n",
" )\n",
" validation_dataset = validation_dataset.register(\n",
" workspace=ws, name=validation_dataset_name\n",
" )\n",
"print(\"Training dataset name: \" + training_dataset.name)\n",
"print(\"Validation dataset name: \" + validation_dataset.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Submit training 1 training run with default hyperparameters"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared.constants import ImageTask\n",
"from azureml.train.automl import AutoMLImageConfig\n",
"from azureml.train.hyperdrive import GridParameterSampling, choice\n",
"\n",
"image_config_vit = AutoMLImageConfig(\n",
" task=ImageTask.IMAGE_CLASSIFICATION,\n",
" compute_target=compute_target,\n",
" training_data=training_dataset,\n",
" validation_data=validation_dataset,\n",
" hyperparameter_sampling=GridParameterSampling({\"model_name\": choice(\"vitb16r224\")}),\n",
" iterations=1,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run = experiment.submit(image_config_vit)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run.wait_for_completion(wait_post_processing=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create data objects\n",
"\n",
"When building pipelines, `Dataset` objects are used for reading data from workspace datastores, and `PipelineData` objects are used for transferring intermediate data between pipeline steps.\n",
"\n",
"This batch scoring example only uses one pipeline step, but in use-cases with multiple steps, the typical flow will include:\n",
"\n",
"1. Using `Dataset` objects as inputs to fetch raw data, performing some transformations, then output a `PipelineData` object. \n",
"1. Use the previous step's `PipelineData` **output object** as an **input object**, repeated for subsequent steps.\n",
"\n",
"For this scenario you create `Dataset` objects corresponding to the datastore directories for the input images. You also create a `PipelineData` object for the batch scoring output data. An object reference in the `outputs` array becomes available as an **input** for a subsequent pipeline step, for scenarios where there is more than one step. In this case we are just going to build a single step pipeline.\n",
"\n",
"It is assumed that an image classification training run was already performed in this workspace and the files are already in the datastore. If this is not the case, please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models) to know how to train an image classification model with AutoML.\n",
"\n",
"All images in this notebook are hosted in [this repository](https://github.com/microsoft/computervision-recipes) and are made available under the [MIT license](https://github.com/microsoft/computervision-recipes/blob/master/LICENSE)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.dataset import Dataset\n",
"from azureml.pipeline.core import PipelineData\n",
"\n",
"input_images = Dataset.File.from_files((def_data_store, \"fridgeObjects/**/*.jpg\"))\n",
"\n",
"output_dir = PipelineData(name=\"scores\", datastore=def_data_store)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Next, we need to register the input datasets for batch scoring with the workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"input_images = input_images.register(\n",
" workspace=ws, name=\"fridgeObjects_scoring_images\", create_new_version=True\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Retrieve the environment and metrics from the training run"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.experiment import Experiment\n",
"from azureml.core import Run\n",
"\n",
"experiment_name = \"automl-image-batchscoring\"\n",
"# If your model was not trained with this notebook, replace the id below\n",
"# with the run id of the child training run (i.e., the one ending with HD_0)\n",
"training_run_id = automl_image_run.id + \"_HD_0\"\n",
"exp = Experiment(ws, experiment_name)\n",
"training_run = Run(exp, training_run_id)\n",
"\n",
"# The below will give only the requested metric\n",
"metrics = training_run.get_metrics(\"accuracy\")\n",
"best_metric = max(metrics[\"accuracy\"])\n",
"print(\"best_metric:\", best_metric)\n",
"\n",
"# Retrieve the training environment\n",
"env = training_run.get_environment()\n",
"print(env)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Register model with metric and environment tags\n",
"\n",
"Now you register the model to your workspace, which allows you to easily retrieve it in the pipeline process. In the `register()` static function, the `model_name` parameter is the key you use to locate your model throughout the SDK.\n",
"Tag the model with the metrics and the environment used to train the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.model import Model\n",
"\n",
"tags = dict()\n",
"tags[\"accuracy\"] = best_metric\n",
"tags[\"env_name\"] = env.name\n",
"tags[\"env_version\"] = env.version\n",
"\n",
"model_name = \"fridgeObjectsClassifier\"\n",
"model = training_run.register_model(\n",
" model_name=model_name, model_path=\"train_artifacts\", tags=tags\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# List the models from the workspace\n",
"models = Model.list(ws, name=model_name, latest=True)\n",
"print(model.name)\n",
"print(model.tags)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Write a scoring script"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To do the scoring, you create a batch scoring script `batch_scoring.py`, and write it to the scripts folder in current directory. The script takes a minibatch of input images, applies the classification model, and outputs the predictions to a results file.\n",
"\n",
"The script `batch_scoring.py` takes the following parameters, which get passed from the `ParallelRunStep` that you create later:\n",
"\n",
"- `--model_name`: the name of the model being used\n",
"\n",
"While creating the batch scoring script, refer to the scoring scripts generated under the outputs folder of the Automl training runs. This will help to identify the right model settings to be used in the batch scoring script init method while loading the model.\n",
"Note: The batch scoring script we generate in the subsequent step is different from the scoring script generated by the training runs in the below screenshot. We refer to it just to identify the right model settings to be used in the batch scoring script.\n",
"\n",
"![Training run outputs](ui_outputs.PNG \"Training run outputs\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# View the batch scoring script. Use the model settings as appropriate for your model.\n",
"with open(\"./scripts/batch_scoring.py\", \"r\") as f:\n",
" print(f.read())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Build and run the pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create the parallel-run configuration to wrap the inference script\n",
"Create the pipeline run configuration specifying the script, environment configuration, and parameters. Specify the compute target you already attached to your workspace as the target of execution of the script. This will set the run configuration of the ParallelRunStep we will define next.\n",
"\n",
"Refer this [site](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/machine-learning-pipelines/parallel-run) for more details on ParallelRunStep of Azure Machine Learning Pipelines."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.steps import ParallelRunConfig\n",
"\n",
"parallel_run_config = ParallelRunConfig(\n",
" environment=env,\n",
" entry_script=\"batch_scoring.py\",\n",
" source_directory=\"scripts\",\n",
" output_action=\"append_row\",\n",
" append_row_file_name=\"parallel_run_step.txt\",\n",
" mini_batch_size=\"20\", # Num files to process in one call\n",
" error_threshold=1,\n",
" compute_target=compute_target,\n",
" process_count_per_node=2,\n",
" node_count=1,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create the pipeline step\n",
"\n",
"A pipeline step is an object that encapsulates everything you need for running a pipeline including:\n",
"\n",
"* environment and dependency settings\n",
"* the compute resource to run the pipeline on\n",
"* input and output data, and any custom parameters\n",
"* reference to a script to run during the step\n",
"\n",
"There are multiple classes that inherit from the parent class [`PipelineStep`](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/?view=azure-ml-py) to assist with building a step using certain frameworks and stacks. In this example, you use the [`ParallelRunStep`](https://docs.microsoft.com/en-us/python/api/azureml-contrib-pipeline-steps/azureml.contrib.pipeline.steps.parallelrunstep?view=azure-ml-py) class to define your step logic using a scoring script. `ParallelRunStep` executes the script in a distributed fashion.\n",
"\n",
"The pipelines infrastructure uses the `ArgumentParser` class to pass parameters into pipeline steps. For example, in the code below the first argument `--model_name` is given the property identifier `model_name`. In the `main()` function, this property is accessed using `Model.get_model_path(args.model_name)`.\n",
"\n",
"Note: The pipeline in this tutorial only has one step and writes the output to a file, but for multi-step pipelines, you also use `ArgumentParser` to define a directory to write output data for input to subsequent steps. See the [notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb) for an example of passing data between multiple pipeline steps using the `ArgumentParser` design pattern."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.steps import ParallelRunStep\n",
"from datetime import datetime\n",
"\n",
"parallel_step_name = \"batchscoring-\" + datetime.now().strftime(\"%Y%m%d%H%M\")\n",
"\n",
"arguments = [\"--model_name\", model_name]\n",
"\n",
"# Specify inference batch_size, otherwise uses default value. (This is different from the mini_batch_size above)\n",
"# NOTE: Large batch sizes may result in OOM errors.\n",
"# arguments = arguments + [\"--batch_size\", \"20\"]\n",
"\n",
"batch_score_step = ParallelRunStep(\n",
" name=parallel_step_name,\n",
" inputs=[input_images.as_named_input(\"input_images\")],\n",
" output=output_dir,\n",
" arguments=arguments,\n",
" parallel_run_config=parallel_run_config,\n",
" allow_reuse=False,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For a list of all classes for different step types, see the [steps package](https://docs.microsoft.com/python/api/azureml-pipeline-steps/azureml.pipeline.steps?view=azure-ml-py)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Run the pipeline\n",
"\n",
"Now you run the pipeline. First create a `Pipeline` object with your workspace reference and the pipeline step you created. The `steps` parameter is an array of steps, and in this case, there is only one step for batch scoring. To build pipelines with multiple steps, you place the steps in order in this array.\n",
"\n",
"Next use the `Experiment.submit()` function to submit the pipeline for execution. You also specify the custom parameter `param_batch_size`. The `wait_for_completion` function will output logs during the pipeline build process, which allows you to see current progress.\n",
"\n",
"Note: The first pipeline run takes roughly **15 minutes**, as all dependencies must be downloaded, a Docker image is created, and the Python environment is provisioned/created. Running it again takes significantly less time as those resources are reused. However, total run time depends on the workload of your scripts and processes running in each pipeline step."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"from azureml.pipeline.core import Pipeline\n",
"\n",
"pipeline = Pipeline(workspace=ws, steps=[batch_score_step])\n",
"pipeline_run = Experiment(ws, \"batch_scoring_automl_image\").submit(pipeline)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# This will output information of the pipeline run, including the link to the details page of portal.\n",
"pipeline_run"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Wait the run for completion and show output log to console\n",
"pipeline_run.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Download and review output"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import tempfile\n",
"import os\n",
"\n",
"batch_run = pipeline_run.find_step_run(batch_score_step.name)[0]\n",
"batch_output = batch_run.get_output_data(output_dir.name)\n",
"\n",
"target_dir = tempfile.mkdtemp()\n",
"batch_output.download(local_path=target_dir)\n",
"result_file = os.path.join(\n",
" target_dir, batch_output.path_on_datastore, parallel_run_config.append_row_file_name\n",
")\n",
"result_file\n",
"\n",
"# Print the first five lines of the output\n",
"with open(result_file) as f:\n",
" for x in range(5):\n",
" print(next(f))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Choose a random file for visualization"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import random\n",
"import json\n",
"\n",
"with open(result_file, \"r\") as f:\n",
" contents = f.readlines()\n",
"rand_file = contents[random.randrange(len(contents))]\n",
"prediction = json.loads(rand_file)\n",
"print(prediction[\"filename\"])\n",
"print(prediction[\"probs\"])\n",
"print(prediction[\"labels\"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Download the image file from the datastore\n",
"path = (\n",
" \"fridgeObjects\"\n",
" + \"/\"\n",
" + prediction[\"filename\"].split(\"/\")[-2]\n",
" + \"/\"\n",
" + prediction[\"filename\"].split(\"/\")[-1]\n",
")\n",
"path_on_datastore = def_data_store.path(path)\n",
"single_image_ds = Dataset.File.from_files(path=path_on_datastore, validate=False)\n",
"image = single_image_ds.download()[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.image as mpimg\n",
"from PIL import Image\n",
"import numpy as np\n",
"import json\n",
"\n",
"IMAGE_SIZE = (18, 12)\n",
"plt.figure(figsize=IMAGE_SIZE)\n",
"img_np = mpimg.imread(image)\n",
"img = Image.fromarray(img_np.astype(\"uint8\"), \"RGB\")\n",
"x, y = img.size\n",
"\n",
"fig, ax = plt.subplots(1, figsize=(15, 15))\n",
"# Display the image\n",
"ax.imshow(img_np)\n",
"\n",
"label_index = np.argmax(prediction[\"probs\"])\n",
"label = prediction[\"labels\"][label_index]\n",
"conf_score = prediction[\"probs\"][label_index]\n",
"\n",
"display_text = \"{} ({})\".format(label, round(conf_score, 3))\n",
"print(display_text)\n",
"\n",
"color = \"red\"\n",
"plt.text(30, 30, display_text, color=color, fontsize=30)\n",
"\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Publish and run from REST endpoint"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Run the following code to publish the pipeline to your workspace. In your workspace in the portal, you can see metadata for the pipeline including run history and durations. You can also run the pipeline manually from the portal.\n",
"\n",
"Additionally, publishing the pipeline enables a REST endpoint to rerun the pipeline from any HTTP library on any platform."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"published_pipeline = pipeline_run.publish_pipeline(\n",
" name=\"automl-image-batch-scoring\",\n",
" description=\"Batch scoring using Automl for Image\",\n",
" version=\"1.0\",\n",
")\n",
"\n",
"published_pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To run the pipeline from the REST endpoint, you first need an OAuth2 Bearer-type authentication header. This example uses interactive authentication for illustration purposes, but for most production scenarios requiring automated or headless authentication, use service principal authentication as [described in this notebook](https://aka.ms/pl-restep-auth).\n",
"\n",
"Service principal authentication involves creating an **App Registration** in **Azure Active Directory**, generating a client secret, and then granting your service principal **role access** to your machine learning workspace. You then use the [`ServicePrincipalAuthentication`](https://docs.microsoft.com/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py) class to manage your auth flow.\n",
"\n",
"Both `InteractiveLoginAuthentication` and `ServicePrincipalAuthentication` inherit from `AbstractAuthentication`, and in both cases you use the `get_authentication_header()` function in the same way to fetch the header."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.authentication import InteractiveLoginAuthentication\n",
"\n",
"interactive_auth = InteractiveLoginAuthentication()\n",
"auth_header = interactive_auth.get_authentication_header()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Get the REST url from the `endpoint` property of the published pipeline object. You can also find the REST url in your workspace in the portal. Build an HTTP POST request to the endpoint, specifying your authentication header. Additionally, add a JSON payload object with the experiment name and the batch size parameter. As a reminder, the `process_count_per_node` is passed through to `ParallelRunStep` because you defined it is defined as a `PipelineParameter` object in the step configuration.\n",
"\n",
"Make the request to trigger the run. Access the `Id` key from the response dictionary to get the value of the run id."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"\n",
"rest_endpoint = published_pipeline.endpoint\n",
"response = requests.post(\n",
" rest_endpoint,\n",
" headers=auth_header,\n",
" json={\n",
" \"ExperimentName\": \"batch_scoring\",\n",
" \"ParameterAssignments\": {\"process_count_per_node\": 2},\n",
" },\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"try:\n",
" response.raise_for_status()\n",
"except Exception:\n",
" raise Exception(\n",
" \"Received bad response from the endpoint: {}\\n\"\n",
" \"Response Code: {}\\n\"\n",
" \"Headers: {}\\n\"\n",
" \"Content: {}\".format(\n",
" rest_endpoint, response.status_code, response.headers, response.content\n",
" )\n",
" )\n",
"run_id = response.json().get(\"Id\")\n",
"print(\"Submitted pipeline run: \", run_id)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Use the run id to monitor the status of the new run. This will take another 10-15 min to run and will look similar to the previous pipeline run, so if you don't need to see another pipeline run, you can skip watching the full output."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core.run import PipelineRun\n",
"\n",
"published_pipeline_run = PipelineRun(ws.experiments[\"batch_scoring\"], run_id)\n",
"published_pipeline_run"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Wait the run for completion and show output log to console\n",
"published_pipeline_run.wait_for_completion(show_output=True)"
]
}
],
"metadata": {
"authors": [
{
"name": [
"sanpil",
"trmccorm",
"pansav"
]
}
],
"categories": [
"tutorials"
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
},
"metadata": {
"interpreter": {
"hash": "0f25b6eb4724eea488a4edd67dd290abce7d142c09986fc811384b5aebc0585a"
}
},
"msauthor": "trbye"
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,69 @@
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license.
import os
import argparse
import json
from azureml.core.model import Model
from azureml.automl.core.shared import logging_utilities
try:
from azureml.automl.dnn.vision.common.logging_utils import get_logger
from azureml.automl.dnn.vision.common.model_export_utils import (
load_model,
run_inference_batch,
)
from azureml.automl.dnn.vision.classification.inference.score import (
_score_with_model,
)
from azureml.automl.dnn.vision.common.utils import _set_logging_parameters
except ImportError:
from azureml.contrib.automl.dnn.vision.common.logging_utils import get_logger
from azureml.contrib.automl.dnn.vision.common.model_export_utils import (
load_model,
run_inference_batch,
)
from azureml.contrib.automl.dnn.vision.classification.inference.score import (
_score_with_model,
)
from azureml.contrib.automl.dnn.vision.common.utils import _set_logging_parameters
TASK_TYPE = "image-classification"
logger = get_logger("azureml.automl.core.scoring_script_images")
def init():
global model
global batch_size
# Set up logging
_set_logging_parameters(TASK_TYPE, {})
parser = argparse.ArgumentParser(
description="Retrieve model_name and batch_size from arguments."
)
parser.add_argument("--model_name", dest="model_name", required=True)
parser.add_argument("--batch_size", dest="batch_size", type=int, required=False)
args, _ = parser.parse_known_args()
batch_size = args.batch_size
model_path = os.path.join(Model.get_model_path(args.model_name), "model.pt")
print(model_path)
try:
logger.info("Loading model from path: {}.".format(model_path))
model_settings = {}
model = load_model(TASK_TYPE, model_path, **model_settings)
logger.info("Loading successful.")
except Exception as e:
logging_utilities.log_traceback(e, logger)
raise
def run(mini_batch):
logger.info("Running inference.")
result = run_inference_batch(model, mini_batch, _score_with_model, batch_size)
logger.info("Finished inferencing.")
return result

Binary file not shown.

After

Width:  |  Height:  |  Size: 258 KiB

View File

@@ -0,0 +1,15 @@
---
page_type: sample
languages:
- python
products:
- azure-machine-learning
description: Notebook showing how to use AutoML for training an Image Classification Multi-Class model. We will use a small dataset to train the model, demonstrate how you can tune hyperparameters of the model to optimize model performance and deploy the model to use in inference scenarios.
---
# Image Classification Multi-Class using AutoML for Images
- Dataset: Toy dataset with images of products found in a fridge
- **[Jupyter Notebook](auto-ml-image-classification-multiclass.ipynb)**
- train an Image Classification Multi-Class model using AutoML
- tune hyperparameters of the model to optimize model performance
- deploy the model to use in inference scenarios

View File

@@ -0,0 +1,744 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License.\n",
"\n",
"# Training an Image Classification Multi-Class model using AutoML\n",
"In this notebook, we go over how you can use AutoML for training an Image Classification Multi-Class model. We will use a small dataset to train the model, demonstrate how you can tune hyperparameters of the model to optimize model performance and deploy the model to use in inference scenarios. For detailed information please refer to the [documentation of AutoML for Images](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![img](example_image_classification_multiclass_predictions.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Important:** This feature is currently in public preview. This preview version is provided without a service-level agreement. Certain features might not be supported or might have constrained capabilities. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/en-us/support/legal/preview-supplemental-terms/)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Environment Setup\n",
"Please follow the [\"Setup a new conda environment\"](https://github.com/Azure/azureml-examples/tree/main/python-sdk/tutorials/automl-with-azureml#3-setup-a-new-conda-environment) instructions to get started."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"\n",
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK.\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK.\")\n",
"assert (\n",
" azureml.core.VERSION >= \"1.35\"\n",
"), \"Please upgrade the Azure ML SDK by running '!pip install --upgrade azureml-sdk' then restart the kernel.\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Workspace setup\n",
"In order to train and deploy models in Azure ML, you will first need to set up a workspace.\n",
"\n",
"An [Azure ML Workspace](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#workspace) is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models.\n",
"\n",
"Create an Azure ML Workspace within your Azure subscription or load an existing workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.workspace import Workspace\n",
"\n",
"ws = Workspace.from_config()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compute target setup\n",
"You will need to provide a [Compute Target](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#computes) that will be used for your AutoML model training. AutoML models for image tasks require [GPU SKUs](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes-gpu) such as the ones from the NC, NCv2, NCv3, ND, NDv2 and NCasT4 series. We recommend using the NCsv3-series (with v100 GPUs) for faster training. Using a compute target with a multi-GPU VM SKU will leverage the multiple GPUs to speed up training. Additionally, setting up a compute target with multiple nodes will allow for faster model training by leveraging parallelism, when tuning hyperparameters for your model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
"\n",
"cluster_name = \"gpu-cluster-nc6\"\n",
"\n",
"try:\n",
" compute_target = ws.compute_targets[cluster_name]\n",
" print(\"Found existing compute target.\")\n",
"except KeyError:\n",
" print(\"Creating a new compute target...\")\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"Standard_NC6\",\n",
" idle_seconds_before_scaledown=600,\n",
" min_nodes=0,\n",
" max_nodes=4,\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
"# Can poll for a minimum number of nodes and for a specific timeout.\n",
"# If no min_node_count is provided, it will use the scale settings for the cluster.\n",
"compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Experiment Setup\n",
"Create an [Experiment](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#experiments) in your workspace to track your model training runs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment_name = \"automl-image-multiclass\"\n",
"experiment = Experiment(ws, name=experiment_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Dataset with input Training Data\n",
"\n",
"In order to generate models for computer vision, you will need to bring in labeled image data as input for model training in the form of an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset). You can either use a dataset that you have exported from a [Data Labeling](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-label-data) project, or create a new Tabular Dataset with your labeled training data."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In this notebook, we use a toy dataset called Fridge Objects, which consists of 134 images of 4 classes of beverage container {can, carton, milk bottle, water bottle} photos taken on different backgrounds.\n",
"\n",
"All images in this notebook are hosted in [this repository](https://github.com/microsoft/computervision-recipes) and are made available under the [MIT license](https://github.com/microsoft/computervision-recipes/blob/master/LICENSE).\n",
"\n",
"We first download and unzip the data locally."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import urllib\n",
"from zipfile import ZipFile\n",
"\n",
"# download data\n",
"download_url = \"https://cvbp-secondary.z19.web.core.windows.net/datasets/image_classification/fridgeObjects.zip\"\n",
"data_file = \"./fridgeObjects.zip\"\n",
"urllib.request.urlretrieve(download_url, filename=data_file)\n",
"\n",
"# extract files\n",
"with ZipFile(data_file, \"r\") as zip:\n",
" print(\"extracting files...\")\n",
" zip.extractall()\n",
" print(\"done\")\n",
"# delete zip file\n",
"os.remove(data_file)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This is a sample image from this dataset:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import Image\n",
"\n",
"sample_image = \"./fridgeObjects/milk_bottle/99.jpg\"\n",
"Image(filename=sample_image)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Convert the downloaded data to JSONL\n",
"In this example, the fridge object dataset is stored in a directory. There are four different folders inside:\n",
"\n",
"- /water_bottle\n",
"- /milk_bottle\n",
"- /carton\n",
"- /can\n",
"\n",
"This is the most common data format for multiclass image classification. Each folder title corresponds to the image label for the images contained inside.\n",
"\n",
"In order to use this data to create an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset), we first need to convert it to the required JSONL format. Please refer to the [documentation on how to prepare datasets](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-prepare-datasets-for-automl-images).\n",
"\n",
"The following script is creating two .jsonl files (one for training and one for validation) in the parent folder of the dataset. The train / validation ratio corresponds to 20% of the data going into the validation file."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import os\n",
"\n",
"src = \"./fridgeObjects/\"\n",
"train_validation_ratio = 5\n",
"\n",
"# Retrieving default datastore that got automatically created when we setup a workspace\n",
"workspaceblobstore = ws.get_default_datastore().name\n",
"\n",
"# Path to the training and validation files\n",
"train_annotations_file = os.path.join(src, \"train_annotations.jsonl\")\n",
"validation_annotations_file = os.path.join(src, \"validation_annotations.jsonl\")\n",
"\n",
"# sample json line dictionary\n",
"json_line_sample = {\n",
" \"image_url\": \"AmlDatastore://\"\n",
" + workspaceblobstore\n",
" + \"/\"\n",
" + os.path.basename(os.path.dirname(src)),\n",
" \"label\": \"\",\n",
"}\n",
"\n",
"index = 0\n",
"# Scan each sub directary and generate jsonl line\n",
"with open(train_annotations_file, \"w\") as train_f:\n",
" with open(validation_annotations_file, \"w\") as validation_f:\n",
" for className in os.listdir(src):\n",
" subDir = src + className\n",
" if not os.path.isdir(subDir):\n",
" continue\n",
" # Scan each sub directary\n",
" print(\"Parsing \" + subDir)\n",
" for image in os.listdir(subDir):\n",
" json_line = dict(json_line_sample)\n",
" json_line[\"image_url\"] += f\"/{className}/{image}\"\n",
" json_line[\"label\"] = className\n",
"\n",
" if index % train_validation_ratio == 0:\n",
" # validation annotation\n",
" validation_f.write(json.dumps(json_line) + \"\\n\")\n",
" else:\n",
" # train annotation\n",
" train_f.write(json.dumps(json_line) + \"\\n\")\n",
" index += 1"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Upload the JSONL file and images to Datastore\n",
"In order to use the data for training in Azure ML, we upload it to our Azure ML Workspace via a [Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#datasets-and-datastores). The datastore provides a mechanism for you to upload/download data and interact with it from your remote compute targets. It is an abstraction over Azure Storage."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Retrieving default datastore that got automatically created when we setup a workspace\n",
"ds = ws.get_default_datastore()\n",
"ds.upload(src_dir=\"./fridgeObjects\", target_path=\"fridgeObjects\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we need to create an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset) from the data we uploaded to the Datastore. We create one dataset for training and one for validation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"from azureml.data import DataType\n",
"\n",
"# get existing training dataset\n",
"training_dataset_name = \"fridgeObjectsTrainingDataset\"\n",
"if training_dataset_name in ws.datasets:\n",
" training_dataset = ws.datasets.get(training_dataset_name)\n",
" print(\"Found the training dataset\", training_dataset_name)\n",
"else:\n",
" # create training dataset\n",
" training_dataset = Dataset.Tabular.from_json_lines_files(\n",
" path=ds.path(\"fridgeObjects/train_annotations.jsonl\"),\n",
" set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n",
" )\n",
" training_dataset = training_dataset.register(\n",
" workspace=ws, name=training_dataset_name\n",
" )\n",
"# get existing validation dataset\n",
"validation_dataset_name = \"fridgeObjectsValidationDataset\"\n",
"if validation_dataset_name in ws.datasets:\n",
" validation_dataset = ws.datasets.get(validation_dataset_name)\n",
" print(\"Found the validation dataset\", validation_dataset_name)\n",
"else:\n",
" # create validation dataset\n",
" validation_dataset = Dataset.Tabular.from_json_lines_files(\n",
" path=ds.path(\"fridgeObjects/validation_annotations.jsonl\"),\n",
" set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n",
" )\n",
" validation_dataset = validation_dataset.register(\n",
" workspace=ws, name=validation_dataset_name\n",
" )\n",
"print(\"Training dataset name: \" + training_dataset.name)\n",
"print(\"Validation dataset name: \" + validation_dataset.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Validation dataset is optional. If no validation dataset is specified, by default 20% of your training data will be used for validation. You can control the percentage using the `split_ratio` argument - please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#model-agnostic-hyperparameters) for more details.\n",
"\n",
"This is what the training dataset looks like:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_dataset.to_pandas_dataframe()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Configuring your AutoML run for image tasks\n",
"AutoML allows you to easily train models for Image Classification, Object Detection & Instance Segmentation on your image data. You can control the model algorithm to be used, specify hyperparameter values for your model as well as perform a sweep across the hyperparameter space to generate an optimal model. Parameters for configuring your AutoML Image run are specified using the `AutoMLImageConfig` - please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#configure-your-experiment-settings) for the details on the parameters that can be used and their values."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"When using AutoML for image tasks, you need to specify the model algorithms using the `model_name` parameter. You can either specify a single model or choose to sweep over multiple models. Please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#configure-model-algorithms-and-hyperparameters) for the list of supported model algorithms.\n",
"\n",
"### Using default hyperparameter values for the specified algorithm\n",
"Before doing a large sweep to search for the optimal models and hyperparameters, we recommend trying the default values for a given model to get a first baseline. Next, you can explore multiple hyperparameters for the same model before sweeping over multiple models and their parameters. This allows an iterative approach, as with multiple models and multiple hyperparameters for each (as we showcase in the next section), the search space grows exponentially, and you need more iterations to find optimal configurations.\n",
"\n",
"If you wish to use the default hyperparameter values for a given algorithm (say `vitb16r224`), you can specify the config for your AutoML Image runs as follows:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared.constants import ImageTask\n",
"from azureml.train.automl import AutoMLImageConfig\n",
"from azureml.train.hyperdrive import GridParameterSampling, choice\n",
"\n",
"image_config_vit = AutoMLImageConfig(\n",
" task=ImageTask.IMAGE_CLASSIFICATION,\n",
" compute_target=compute_target,\n",
" training_data=training_dataset,\n",
" validation_data=validation_dataset,\n",
" hyperparameter_sampling=GridParameterSampling({\"model_name\": choice(\"vitb16r224\")}),\n",
" iterations=1,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Submitting an AutoML run for Computer Vision tasks\n",
"Once you've created the config settings for your run, you can submit an AutoML run using the config in order to train a vision model using your training dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run = experiment.submit(image_config_vit)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run.wait_for_completion(wait_post_processing=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Hyperparameter sweeping for your AutoML models for computer vision tasks\n",
"In this example, we use the AutoMLImageConfig to train an Image Classification model using the following model algorithms: `seresnext`, `resnet50`, `vitb16r224`, and `vits16r224`.\n",
"\n",
"When using AutoML for Images, you can perform a hyperparameter sweep over a defined parameter space to find the optimal model. In this example, we sweep over the hyperparameters for each algorithm, choosing from a range of values for learning_rate, number_of_epochs, layers_to_freeze, etc., to generate a model with the optimal 'accuracy'. If hyperparameter values are not specified, then default values are used for the specified algorithm.\n",
"\n",
"We use Random Sampling to pick samples from this parameter space and try a total of 10 iterations with these different samples, running 2 iterations at a time on our compute target, which has been previously set up using 4 nodes. Please note that the more parameters the space has, the more iterations you need to find optimal models.\n",
"\n",
"We leverage the Bandit early termination policy which will terminate poor performing configs (those that are not within 20% slack of the best performing config), thus significantly saving compute resources.\n",
"\n",
"For more details on model and hyperparameter sweeping, please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared.constants import ImageTask\n",
"from azureml.train.automl import AutoMLImageConfig\n",
"from azureml.train.hyperdrive import BanditPolicy, RandomParameterSampling\n",
"from azureml.train.hyperdrive import choice, uniform\n",
"\n",
"parameter_space = {\n",
" \"learning_rate\": uniform(0.001, 0.01),\n",
" \"model\": choice(\n",
" {\n",
" \"model_name\": choice(\"vitb16r224\", \"vits16r224\"),\n",
" \"number_of_epochs\": choice(15, 30),\n",
" },\n",
" {\n",
" \"model_name\": choice(\"seresnext\", \"resnest50\"),\n",
" \"layers_to_freeze\": choice(0, 2),\n",
" },\n",
" ),\n",
"}\n",
"\n",
"tuning_settings = {\n",
" \"iterations\": 10,\n",
" \"max_concurrent_iterations\": 2,\n",
" \"hyperparameter_sampling\": RandomParameterSampling(parameter_space),\n",
" \"early_termination_policy\": BanditPolicy(\n",
" evaluation_interval=2, slack_factor=0.2, delay_evaluation=6\n",
" ),\n",
"}\n",
"\n",
"automl_image_config = AutoMLImageConfig(\n",
" task=ImageTask.IMAGE_CLASSIFICATION,\n",
" compute_target=compute_target,\n",
" training_data=training_dataset,\n",
" validation_data=validation_dataset,\n",
" **tuning_settings,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run = experiment.submit(automl_image_config)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run.wait_for_completion(wait_post_processing=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"When doing a hyperparameter sweep, it can be useful to visualize the different configurations that were tried using the HyperDrive UI. You can navigate to this UI by going to the 'Child runs' tab in the UI of the main `automl_image_run` from above, which is the HyperDrive parent run. Then you can go into the 'Child runs' tab of this HyperDrive parent run. Alternatively, here below you can see directly the HyperDrive parent run and navigate to its 'Child runs' tab:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Run\n",
"\n",
"hyperdrive_run = Run(experiment=experiment, run_id=automl_image_run.id + \"_HD\")\n",
"hyperdrive_run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Register the optimal vision model from the AutoML run\n",
"Once the run completes, we can register the model that was created from the best run (configuration that resulted in the best primary metric)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Register the model from the best run\n",
"\n",
"best_child_run = automl_image_run.get_best_child()\n",
"model_name = best_child_run.properties[\"model_name\"]\n",
"model = best_child_run.register_model(\n",
" model_name=model_name, model_path=\"outputs/model.pt\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deploy model as a web service\n",
"Once you have your trained model, you can deploy the model on Azure. You can deploy your trained model as a web service on Azure Container Instances ([ACI](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-azure-container-instance)) or Azure Kubernetes Service ([AKS](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-azure-kubernetes-service)). Please note that ACI only supports small models under 1 GB in size. For testing larger models or for the high-scale production stage, we recommend using AKS.\n",
"In this tutorial, we will deploy the model as a web service in AKS."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You will need to first create an AKS compute cluster or use an existing AKS cluster. You can use either GPU or CPU VM SKUs for your deployment cluster"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AksCompute\n",
"from azureml.exceptions import ComputeTargetException\n",
"\n",
"# Choose a name for your cluster\n",
"aks_name = \"aks-cpu-mc\"\n",
"# Check to see if the cluster already exists\n",
"try:\n",
" aks_target = ComputeTarget(workspace=ws, name=aks_name)\n",
" print(\"Found existing compute target\")\n",
"except ComputeTargetException:\n",
" print(\"Creating a new compute target...\")\n",
" # Provision AKS cluster with a CPU machine\n",
" prov_config = AksCompute.provisioning_configuration(vm_size=\"STANDARD_D3_V2\")\n",
" # Create the cluster\n",
" aks_target = ComputeTarget.create(\n",
" workspace=ws, name=aks_name, provisioning_configuration=prov_config\n",
" )\n",
" aks_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Next, you will need to define the [inference configuration](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#update-inference-configuration), that describes how to set up the web-service containing your model. You can use the scoring script and the environment from the training run in your inference config.\n",
"\n",
"<b>Note:</b> To change the model's settings, open the downloaded scoring script and modify the model_settings variable <i>before</i> deploying the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.model import InferenceConfig\n",
"\n",
"best_child_run.download_file(\n",
" \"outputs/scoring_file_v_1_0_0.py\", output_file_path=\"score.py\"\n",
")\n",
"environment = best_child_run.get_environment()\n",
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=environment)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can then deploy the model as an AKS web service."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Deploy the model from the best run as an AKS web service\n",
"from azureml.core.webservice import AksWebservice\n",
"from azureml.core.model import Model\n",
"\n",
"aks_config = AksWebservice.deploy_configuration(\n",
" autoscale_enabled=True, cpu_cores=1, memory_gb=5, enable_app_insights=True\n",
")\n",
"\n",
"aks_service = Model.deploy(\n",
" ws,\n",
" models=[model],\n",
" inference_config=inference_config,\n",
" deployment_config=aks_config,\n",
" deployment_target=aks_target,\n",
" name=\"automl-image-test-cpu-mc\",\n",
" overwrite=True,\n",
")\n",
"aks_service.wait_for_deployment(show_output=True)\n",
"print(aks_service.state)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the web service\n",
"Finally, let's test our deployed web service to predict new images. You can pass in any image. In this case, we'll use a random image from the dataset and pass it to the scoring URI."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"\n",
"# URL for the web service\n",
"scoring_uri = aks_service.scoring_uri\n",
"\n",
"# If the service is authenticated, set the key or token\n",
"key, _ = aks_service.get_keys()\n",
"\n",
"sample_image = \"./test_image.jpg\"\n",
"\n",
"# Load image data\n",
"data = open(sample_image, \"rb\").read()\n",
"\n",
"# Set the content type\n",
"headers = {\"Content-Type\": \"application/octet-stream\"}\n",
"\n",
"# If authentication is enabled, set the authorization header\n",
"headers[\"Authorization\"] = f\"Bearer {key}\"\n",
"\n",
"# Make the request and display the response\n",
"resp = requests.post(scoring_uri, data, headers=headers)\n",
"print(resp.text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Visualize predictions\n",
"Now that we have scored a test image, we can visualize the prediction for this image"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.image as mpimg\n",
"from PIL import Image\n",
"import numpy as np\n",
"import json\n",
"\n",
"IMAGE_SIZE = (18, 12)\n",
"plt.figure(figsize=IMAGE_SIZE)\n",
"img_np = mpimg.imread(sample_image)\n",
"img = Image.fromarray(img_np.astype(\"uint8\"), \"RGB\")\n",
"x, y = img.size\n",
"\n",
"fig, ax = plt.subplots(1, figsize=(15, 15))\n",
"# Display the image\n",
"ax.imshow(img_np)\n",
"\n",
"prediction = json.loads(resp.text)\n",
"label_index = np.argmax(prediction[\"probs\"])\n",
"label = prediction[\"labels\"][label_index]\n",
"conf_score = prediction[\"probs\"][label_index]\n",
"\n",
"display_text = \"{} ({})\".format(label, round(conf_score, 3))\n",
"print(display_text)\n",
"\n",
"color = \"red\"\n",
"plt.text(30, 30, display_text, color=color, fontsize=30)\n",
"\n",
"plt.show()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.10"
},
"nteract": {
"version": "nteract-front-end@1.0.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 272 KiB

View File

@@ -0,0 +1,15 @@
---
page_type: sample
languages:
- python
products:
- azure-machine-learning
description: Notebook showing how to use AutoML for training an Image Classification Multi-Label model. We will use a small dataset to train the model, demonstrate how you can tune hyperparameters of the model to optimize model performance and deploy the model to use in inference scenarios.
---
# Image Classification Multi-Label using AutoML for Images
- Dataset: Toy dataset with images of products found in a fridge
- **[Jupyter Notebook](auto-ml-image-classification-multilabel.ipynb)**
- train an Image Classification Multi-Label model using AutoML
- tune hyperparameters of the model to optimize model performance
- deploy the model to use in inference scenarios

View File

@@ -0,0 +1,742 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License.\n",
"\n",
"# Training an Image Classification Multi-Label model using AutoML\n",
"In this notebook, we go over how you can use AutoML for training an Image Classification Multi-Label model. We will use a small dataset to train the model, demonstrate how you can tune hyperparameters of the model to optimize model performance and deploy the model to use in inference scenarios. For detailed information please refer to the [documentation of AutoML for Images](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![img](example_image_classification_multilabel_predictions.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Important:** This feature is currently in public preview. This preview version is provided without a service-level agreement. Certain features might not be supported or might have constrained capabilities. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/en-us/support/legal/preview-supplemental-terms/)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Environment Setup\n",
"Please follow the [\"Setup a new conda environment\"](https://github.com/Azure/azureml-examples/tree/main/python-sdk/tutorials/automl-with-azureml#3-setup-a-new-conda-environment) instructions to get started."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"\n",
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK.\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK.\")\n",
"assert (\n",
" azureml.core.VERSION >= \"1.35\"\n",
"), \"Please upgrade the Azure ML SDK by running '!pip install --upgrade azureml-sdk' then restart the kernel.\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Workspace setup\n",
"In order to train and deploy models in Azure ML, you will first need to set up a workspace.\n",
"\n",
"An [Azure ML Workspace](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#workspace) is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models.\n",
"\n",
"Create an Azure ML Workspace within your Azure subscription or load an existing workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.workspace import Workspace\n",
"\n",
"ws = Workspace.from_config()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compute target setup\n",
"You will need to provide a [Compute Target](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#computes) that will be used for your AutoML model training. AutoML models for image tasks require [GPU SKUs](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes-gpu) such as the ones from the NC, NCv2, NCv3, ND, NDv2 and NCasT4 series. We recommend using the NCsv3-series (with v100 GPUs) for faster training. Using a compute target with a multi-GPU VM SKU will leverage the multiple GPUs to speed up training. Additionally, setting up a compute target with multiple nodes will allow for faster model training by leveraging parallelism, when tuning hyperparameters for your model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
"\n",
"cluster_name = \"gpu-cluster-nc6\"\n",
"\n",
"try:\n",
" compute_target = ws.compute_targets[cluster_name]\n",
" print(\"Found existing compute target.\")\n",
"except KeyError:\n",
" print(\"Creating a new compute target...\")\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"Standard_NC6\",\n",
" idle_seconds_before_scaledown=600,\n",
" min_nodes=0,\n",
" max_nodes=4,\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
"# Can poll for a minimum number of nodes and for a specific timeout.\n",
"# If no min_node_count is provided, it will use the scale settings for the cluster.\n",
"compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Experiment Setup\n",
"Create an [Experiment](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#experiments) in your workspace to track your model training runs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment_name = \"automl-image-classification-multilabel\"\n",
"experiment = Experiment(ws, name=experiment_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Dataset with input Training Data\n",
"\n",
"In order to generate models for computer vision, you will need to bring in labeled image data as input for model training in the form of an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset). You can either use a dataset that you have exported from a [Data Labeling](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-label-data) project, or create a new Tabular Dataset with your labeled training data."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In this notebook, we use a toy dataset called Fridge Objects, which consists of 128 images of 4 labels of beverage container {can, carton, milk bottle, water bottle} photos taken on different backgrounds. It also includes a labels file in .csv format. This is one of the most common data formats for Image Classification Multi-Label: one csv file that contains the mapping of labels to a folder of images.\n",
"\n",
"All images in this notebook are hosted in [this repository](https://github.com/microsoft/computervision-recipes) and are made available under the [MIT license](https://github.com/microsoft/computervision-recipes/blob/master/LICENSE).\n",
"\n",
"We first download and unzip the data locally."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import urllib\n",
"from zipfile import ZipFile\n",
"\n",
"# download data\n",
"download_url = \"https://cvbp-secondary.z19.web.core.windows.net/datasets/image_classification/multilabelFridgeObjects.zip\"\n",
"data_file = \"./multilabelFridgeObjects.zip\"\n",
"urllib.request.urlretrieve(download_url, filename=data_file)\n",
"\n",
"# extract files\n",
"with ZipFile(data_file, \"r\") as zip:\n",
" print(\"extracting files...\")\n",
" zip.extractall()\n",
" print(\"done\")\n",
"# delete zip file\n",
"os.remove(data_file)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This is a sample image from this dataset:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import Image\n",
"\n",
"sample_image = \"./multilabelFridgeObjects/images/56.jpg\"\n",
"Image(filename=sample_image)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Convert the downloaded data to JSONL\n",
"In this example, the fridge object dataset is annotated in the CSV file, where each image corresponds to a line. It defines a mapping of the filename to the labels. Since this is a multi-label classification problem, each image can be associated to multiple labels. In order to use this data to create an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset), we first need to convert it to the required JSONL format. Please refer to the [documentation on how to prepare datasets](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-prepare-datasets-for-automl-images).\n",
"\n",
"The following script is creating two .jsonl files (one for training and one for validation) in the parent folder of the dataset. The train / validation ratio corresponds to 20% of the data going into the validation file."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import os\n",
"\n",
"src = \"./multilabelFridgeObjects\"\n",
"train_validation_ratio = 5\n",
"\n",
"# Retrieving default datastore that got automatically created when we setup a workspace\n",
"workspaceblobstore = ws.get_default_datastore().name\n",
"\n",
"# Path to the labels file.\n",
"labelFile = os.path.join(src, \"labels.csv\")\n",
"\n",
"# Path to the training and validation files\n",
"train_annotations_file = os.path.join(src, \"train_annotations.jsonl\")\n",
"validation_annotations_file = os.path.join(src, \"validation_annotations.jsonl\")\n",
"\n",
"# sample json line dictionary\n",
"json_line_sample = {\n",
" \"image_url\": \"AmlDatastore://\" + workspaceblobstore + \"/multilabelFridgeObjects\",\n",
" \"label\": [],\n",
"}\n",
"\n",
"# Read each annotation and convert it to jsonl line\n",
"with open(train_annotations_file, \"w\") as train_f:\n",
" with open(validation_annotations_file, \"w\") as validation_f:\n",
" with open(labelFile, \"r\") as labels:\n",
" for i, line in enumerate(labels):\n",
" # Skipping the title line and any empty lines.\n",
" if i == 0 or len(line.strip()) == 0:\n",
" continue\n",
" line_split = line.strip().split(\",\")\n",
" if len(line_split) != 2:\n",
" print(\"Skipping the invalid line: {}\".format(line))\n",
" continue\n",
" json_line = dict(json_line_sample)\n",
" json_line[\"image_url\"] += f\"/images/{line_split[0]}\"\n",
" json_line[\"label\"] = line_split[1].strip().split(\" \")\n",
"\n",
" if i % train_validation_ratio == 0:\n",
" # validation annotation\n",
" validation_f.write(json.dumps(json_line) + \"\\n\")\n",
" else:\n",
" # train annotation\n",
" train_f.write(json.dumps(json_line) + \"\\n\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Upload the JSONL file and images to Datastore\n",
"In order to use the data for training in Azure ML, we upload it to our Azure ML Workspace via a [Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#datasets-and-datastores). The datastore provides a mechanism for you to upload/download data and interact with it from your remote compute targets. It is an abstraction over Azure Storage."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Retrieving default datastore that got automatically created when we setup a workspace\n",
"ds = ws.get_default_datastore()\n",
"ds.upload(src_dir=\"./multilabelFridgeObjects\", target_path=\"multilabelFridgeObjects\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we need to create an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset) from the data we uploaded to the Datastore. We create one dataset for training and one for validation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"from azureml.data import DataType\n",
"\n",
"# get existing training dataset\n",
"training_dataset_name = \"multilabelFridgeObjectsTrainingDataset\"\n",
"if training_dataset_name in ws.datasets:\n",
" training_dataset = ws.datasets.get(training_dataset_name)\n",
" print(\"Found the training dataset\", training_dataset_name)\n",
"else:\n",
" # create training dataset\n",
" training_dataset = Dataset.Tabular.from_json_lines_files(\n",
" path=ds.path(\"multilabelFridgeObjects/train_annotations.jsonl\"),\n",
" set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n",
" )\n",
" training_dataset = training_dataset.register(\n",
" workspace=ws, name=training_dataset_name\n",
" )\n",
"# get existing validation dataset\n",
"validation_dataset_name = \"multilabelFridgeObjectsValidationDataset\"\n",
"if validation_dataset_name in ws.datasets:\n",
" validation_dataset = ws.datasets.get(validation_dataset_name)\n",
" print(\"Found the validation dataset\", validation_dataset_name)\n",
"else:\n",
" # create validation dataset\n",
" validation_dataset = Dataset.Tabular.from_json_lines_files(\n",
" path=ds.path(\"multilabelFridgeObjects/validation_annotations.jsonl\"),\n",
" set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n",
" )\n",
" validation_dataset = validation_dataset.register(\n",
" workspace=ws, name=validation_dataset_name\n",
" )\n",
"print(\"Training dataset name: \" + training_dataset.name)\n",
"print(\"Validation dataset name: \" + validation_dataset.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Validation dataset is optional. If no validation dataset is specified, by default 20% of your training data will be used for validation. You can control the percentage using the `split_ratio` argument - please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#model-agnostic-hyperparameters) for more details.\n",
"\n",
"This is what the training dataset looks like:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_dataset.to_pandas_dataframe()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Configuring your AutoML run for image tasks\n",
"AutoML allows you to easily train models for Image Classification, Object Detection & Instance Segmentation on your image data. You can control the model algorithm to be used, specify hyperparameter values for your model as well as perform a sweep across the hyperparameter space to generate an optimal model. Parameters for configuring your AutoML Image run are specified using the `AutoMLImageConfig` - please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#configure-your-experiment-settings) for the details on the parameters that can be used and their values."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"When using AutoML for image tasks, you need to specify the model algorithms using the `model_name` parameter. You can either specify a single model or choose to sweep over multiple models. Please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#configure-model-algorithms-and-hyperparameters) for the list of supported model algorithms.\n",
"\n",
"### Using default hyperparameter values for the specified algorithm\n",
"Before doing a large sweep to search for the optimal models and hyperparameters, we recommend trying the default values for a given model to get a first baseline. Next, you can explore multiple hyperparameters for the same model before sweeping over multiple models and their parameters. This allows an iterative approach, as with multiple models and multiple hyperparameters for each (as we showcase in the next section), the search space grows exponentially, and you need more iterations to find optimal configurations.\n",
"\n",
"If you wish to use the default hyperparameter values for a given algorithm (say `vitb16r224`), you can specify the config for your AutoML Image runs as follows:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared.constants import ImageTask\n",
"from azureml.train.automl import AutoMLImageConfig\n",
"from azureml.train.hyperdrive import GridParameterSampling, choice\n",
"\n",
"image_config_vit = AutoMLImageConfig(\n",
" task=ImageTask.IMAGE_CLASSIFICATION_MULTILABEL,\n",
" compute_target=compute_target,\n",
" training_data=training_dataset,\n",
" validation_data=validation_dataset,\n",
" hyperparameter_sampling=GridParameterSampling({\"model_name\": choice(\"vitb16r224\")}),\n",
" iterations=1,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Submitting an AutoML run for Computer Vision tasks\n",
"Once you've created the config settings for your run, you can submit an AutoML run using the config in order to train a vision model using your training dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run = experiment.submit(image_config_vit)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run.wait_for_completion(wait_post_processing=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Hyperparameter sweeping for your AutoML models for computer vision tasks\n",
"In this example, we use the AutoMLImageConfig to train an Image Classification model using the `vitb16r224` and `seresnext` model algorithms.\n",
"\n",
"When using AutoML for Images, you can perform a hyperparameter sweep over a defined parameter space to find the optimal model. In this example, we sweep over the hyperparameters for each algorithm, choosing from a range of values for learning_rate, grad_accumulation_step, valid_resize_size, etc., to generate a model with the optimal 'accuracy'. If hyperparameter values are not specified, then default values are used for the specified algorithm.\n",
"\n",
"We use Random Sampling to pick samples from this parameter space and try a total of 10 iterations with these different samples, running 2 iterations at a time on our compute target, which has been previously set up using 4 nodes. Please note that the more parameters the space has, the more iterations you need to find optimal models.\n",
"\n",
"We leverage the Bandit early termination policy which will terminate poor performing configs (those that are not within 20% slack of the best performing config), thus significantly saving compute resources.\n",
"\n",
"For more details on model and hyperparameter sweeping, please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared.constants import ImageTask\n",
"from azureml.train.automl import AutoMLImageConfig\n",
"from azureml.train.hyperdrive import BanditPolicy, RandomParameterSampling\n",
"from azureml.train.hyperdrive import choice, uniform\n",
"\n",
"parameter_space = {\n",
" \"learning_rate\": uniform(0.005, 0.05),\n",
" \"model\": choice(\n",
" {\n",
" \"model_name\": choice(\"vitb16r224\"),\n",
" \"number_of_epochs\": choice(15, 30),\n",
" \"grad_accumulation_step\": choice(1, 2),\n",
" },\n",
" {\n",
" \"model_name\": choice(\"seresnext\"),\n",
" # model-specific, valid_resize_size should be larger or equal than valid_crop_size\n",
" \"valid_resize_size\": choice(288, 320, 352),\n",
" \"valid_crop_size\": choice(224, 256), # model-specific\n",
" \"train_crop_size\": choice(224, 256), # model-specific\n",
" },\n",
" ),\n",
"}\n",
"\n",
"tuning_settings = {\n",
" \"iterations\": 10,\n",
" \"max_concurrent_iterations\": 2,\n",
" \"hyperparameter_sampling\": RandomParameterSampling(parameter_space),\n",
" \"early_termination_policy\": BanditPolicy(\n",
" evaluation_interval=2, slack_factor=0.2, delay_evaluation=6\n",
" ),\n",
"}\n",
"\n",
"automl_image_config = AutoMLImageConfig(\n",
" task=ImageTask.IMAGE_CLASSIFICATION_MULTILABEL,\n",
" compute_target=compute_target,\n",
" training_data=training_dataset,\n",
" validation_data=validation_dataset,\n",
" **tuning_settings,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run = experiment.submit(automl_image_config)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run.wait_for_completion(wait_post_processing=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"When doing a hyperparameter sweep, it can be useful to visualize the different configurations that were tried using the HyperDrive UI. You can navigate to this UI by going to the 'Child runs' tab in the UI of the main `automl_image_run` from above, which is the HyperDrive parent run. Then you can go into the 'Child runs' tab of this HyperDrive parent run. Alternatively, here below you can see directly the HyperDrive parent run and navigate to its 'Child runs' tab:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Run\n",
"\n",
"hyperdrive_run = Run(experiment=experiment, run_id=automl_image_run.id + \"_HD\")\n",
"hyperdrive_run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Register the optimal vision model from the AutoML run\n",
"Once the run completes, we can register the model that was created from the best run (configuration that resulted in the best primary metric)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Register the model from the best run\n",
"\n",
"best_child_run = automl_image_run.get_best_child()\n",
"model_name = best_child_run.properties[\"model_name\"]\n",
"model = best_child_run.register_model(\n",
" model_name=model_name, model_path=\"outputs/model.pt\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deploy model as a web service\n",
"Once you have your trained model, you can deploy the model on Azure. You can deploy your trained model as a web service on Azure Container Instances ([ACI](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-azure-container-instance)) or Azure Kubernetes Service ([AKS](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-azure-kubernetes-service)). Please note that ACI only supports small models under 1 GB in size. For testing larger models or for the high-scale production stage, we recommend using AKS.\n",
"In this tutorial, we will deploy the model as a web service in AKS."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You will need to first create an AKS compute cluster or use an existing AKS cluster. You can use either GPU or CPU VM SKUs for your deployment cluster"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AksCompute\n",
"from azureml.exceptions import ComputeTargetException\n",
"\n",
"# Choose a name for your cluster\n",
"aks_name = \"aks-cpu-ml\"\n",
"# Check to see if the cluster already exists\n",
"try:\n",
" aks_target = ComputeTarget(workspace=ws, name=aks_name)\n",
" print(\"Found existing compute target\")\n",
"except ComputeTargetException:\n",
" print(\"Creating a new compute target...\")\n",
" # Provision AKS cluster with a CPU machine\n",
" prov_config = AksCompute.provisioning_configuration(vm_size=\"STANDARD_D3_V2\")\n",
" # Create the cluster\n",
" aks_target = ComputeTarget.create(\n",
" workspace=ws, name=aks_name, provisioning_configuration=prov_config\n",
" )\n",
" aks_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Next, you will need to define the [inference configuration](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#update-inference-configuration), that describes how to set up the web-service containing your model. You can use the scoring script and the environment from the training run in your inference config.\n",
"\n",
"<b>Note:</b> To change the model's settings, open the downloaded scoring script and modify the model_settings variable <i>before</i> deploying the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.model import InferenceConfig\n",
"\n",
"best_child_run.download_file(\n",
" \"outputs/scoring_file_v_1_0_0.py\", output_file_path=\"score.py\"\n",
")\n",
"environment = best_child_run.get_environment()\n",
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=environment)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can then deploy the model as an AKS web service."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Deploy the model from the best run as an AKS web service\n",
"from azureml.core.webservice import AksWebservice\n",
"from azureml.core.model import Model\n",
"\n",
"aks_config = AksWebservice.deploy_configuration(\n",
" autoscale_enabled=True, cpu_cores=1, memory_gb=5, enable_app_insights=True\n",
")\n",
"\n",
"aks_service = Model.deploy(\n",
" ws,\n",
" models=[model],\n",
" inference_config=inference_config,\n",
" deployment_config=aks_config,\n",
" deployment_target=aks_target,\n",
" name=\"automl-image-test-cpu-ml\",\n",
" overwrite=True,\n",
")\n",
"aks_service.wait_for_deployment(show_output=True)\n",
"print(aks_service.state)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the web service\n",
"Finally, let's test our deployed web service to predict new images. You can pass in any image. In this case, we'll use a random image from the dataset and pass it to the scoring URI."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"from IPython.display import Image\n",
"\n",
"# URL for the web service\n",
"scoring_uri = aks_service.scoring_uri\n",
"\n",
"# If the service is authenticated, set the key or token\n",
"key, _ = aks_service.get_keys()\n",
"\n",
"sample_image = \"./test_image.jpg\"\n",
"\n",
"# Load image data\n",
"data = open(sample_image, \"rb\").read()\n",
"\n",
"# Set the content type\n",
"headers = {\"Content-Type\": \"application/octet-stream\"}\n",
"\n",
"# If authentication is enabled, set the authorization header\n",
"headers[\"Authorization\"] = f\"Bearer {key}\"\n",
"\n",
"# Make the request and display the response\n",
"resp = requests.post(scoring_uri, data, headers=headers)\n",
"print(resp.text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Visualize predictions\n",
"Now that we have scored a test image, we can visualize the predictions for this image"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.image as mpimg\n",
"from PIL import Image\n",
"import json\n",
"\n",
"IMAGE_SIZE = (18, 12)\n",
"plt.figure(figsize=IMAGE_SIZE)\n",
"img_np = mpimg.imread(sample_image)\n",
"img = Image.fromarray(img_np.astype(\"uint8\"), \"RGB\")\n",
"x, y = img.size\n",
"\n",
"fig, ax = plt.subplots(1, figsize=(15, 15))\n",
"# Display the image\n",
"ax.imshow(img_np)\n",
"\n",
"prediction = json.loads(resp.text)\n",
"score_threshold = 0.5\n",
"\n",
"label_offset_x = 30\n",
"label_offset_y = 30\n",
"for index, score in enumerate(prediction[\"probs\"]):\n",
" if score > score_threshold:\n",
" label = prediction[\"labels\"][index]\n",
" display_text = \"{} ({})\".format(label, round(score, 3))\n",
" print(display_text)\n",
"\n",
" color = \"red\"\n",
" plt.text(label_offset_x, label_offset_y, display_text, color=color, fontsize=30)\n",
" label_offset_y += 30\n",
"plt.show()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.10"
},
"nteract": {
"version": "nteract-front-end@1.0.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 160 KiB

Some files were not shown because too many files have changed in this diff Show More