Compare commits

...

68 Commits

Author SHA1 Message Date
amlrelsa-ms
71e061b193 update samples from Release-114 as a part of 1.38.0 SDK stable release 2022-02-16 16:32:55 +00:00
Harneet Virk
9094da4085 Merge pull request #1684 from Azure/release_update/Release-122
update samples from Release-122 as a part of  SDK release
2022-02-14 11:38:49 -08:00
amlrelsa-ms
ebf9d2855c update samples from Release-122 as a part of SDK release 2022-02-14 19:24:27 +00:00
v-pbavanari
1bbd78eb33 update samples from Release-121 as a part of SDK release (#1678)
Co-authored-by: amlrelsa-ms <amlrelsa@microsoft.com>
2022-02-02 12:28:49 -05:00
v-pbavanari
77f5a69e04 update samples from Release-120 as a part of SDK release (#1676)
Co-authored-by: amlrelsa-ms <amlrelsa@microsoft.com>
2022-01-28 12:51:49 -05:00
raja7592
ce82af2ab0 update samples from Release-118 as a part of SDK release (#1673)
Co-authored-by: amlrelsa-ms <amlrelsa@microsoft.com>
2022-01-24 20:07:35 -05:00
Harneet Virk
2a2d2efa17 Merge pull request #1658 from Azure/release_update/Release-117
Update samples from Release sdk 1.37.0 as a part of  SDK release
2021-12-13 10:36:08 -08:00
amlrelsa-ms
dd494e9cac update samples from Release-117 as a part of SDK release 2021-12-13 16:57:22 +00:00
Harneet Virk
352adb7487 Merge pull request #1629 from Azure/release_update/Release-116
Update samples from Release as a part of SDK release 1.36.0
2021-11-08 09:48:25 -08:00
amlrelsa-ms
aebe34b4e8 update samples from Release-116 as a part of SDK release 2021-11-08 16:09:41 +00:00
Harneet Virk
c7e1241e20 Merge pull request #1612 from Azure/release_update/Release-115
Update samples from Release-115 as a part of  SDK release
2021-10-11 12:01:59 -07:00
amlrelsa-ms
6529298c24 update samples from Release-115 as a part of SDK release 2021-10-11 16:09:57 +00:00
Harneet Virk
e2dddfde85 Merge pull request #1601 from Azure/release_update/Release-114
update samples from Release-114 as a part of  SDK release
2021-09-29 14:21:59 -07:00
amlrelsa-ms
36d96f96ec update samples from Release-114 as a part of SDK release 2021-09-29 20:16:51 +00:00
Harneet Virk
7ebcfea5a3 Merge pull request #1600 from Azure/release_update/Release-113
update samples from Release-113 as a part of  SDK release
2021-09-28 12:53:57 -07:00
amlrelsa-ms
b20bfed33a update samples from Release-113 as a part of SDK release 2021-09-28 19:44:58 +00:00
Harneet Virk
a66a92e338 Merge pull request #1597 from Azure/release_update/Release-112
update samples from Release-112 as a part of  SDK release
2021-09-24 14:44:53 -07:00
amlrelsa-ms
c56c2c3525 update samples from Release-112 as a part of SDK release 2021-09-24 21:40:44 +00:00
Harneet Virk
4cac072fa4 Merge pull request #1588 from Azure/release_update/Release-111
Update samples from Release-111 as a part of SDK 1.34.0 release
2021-09-09 09:02:38 -07:00
amlrelsa-ms
aeab6b3e28 update samples from Release-111 as a part of SDK release 2021-09-07 17:32:15 +00:00
Harneet Virk
015e261f29 Merge pull request #1581 from Azure/release_update/Release-110
update samples from Release-110 as a part of  SDK release
2021-08-20 09:21:08 -07:00
amlrelsa-ms
d2a423dde9 update samples from Release-110 as a part of SDK release 2021-08-20 00:28:42 +00:00
Harneet Virk
3ecbfd6532 Merge pull request #1578 from Azure/release_update/Release-109
update samples from Release-109 as a part of  SDK release
2021-08-18 18:16:31 -07:00
amlrelsa-ms
02ecb2d755 update samples from Release-109 as a part of SDK release 2021-08-18 22:07:12 +00:00
Harneet Virk
122df6e846 Merge pull request #1576 from Azure/release_update/Release-108
update samples from Release-108 as a part of  SDK release
2021-08-18 09:47:34 -07:00
amlrelsa-ms
7d6a0a2051 update samples from Release-108 as a part of SDK release 2021-08-18 00:33:54 +00:00
Harneet Virk
6cc8af80a2 Merge pull request #1565 from Azure/release_update/Release-107
update samples from Release-107 as a part of  SDK release 1.33
2021-08-02 13:14:30 -07:00
amlrelsa-ms
f61898f718 update samples from Release-107 as a part of SDK release 2021-08-02 18:01:38 +00:00
Harneet Virk
5cb465171e Merge pull request #1556 from Azure/update-spark-notebook
updating spark notebook
2021-07-26 17:09:42 -07:00
Shivani Santosh Sambare
0ce37dd18f updating spark notebook 2021-07-26 15:51:54 -07:00
Cody
d835b183a5 update README.md (#1552) 2021-07-15 10:43:22 -07:00
Cody
d3cafebff9 add code of conduct (#1551) 2021-07-15 08:08:44 -07:00
Harneet Virk
354b194a25 Merge pull request #1543 from Azure/release_update/Release-106
update samples from Release-106 as a part of  SDK release
2021-07-06 11:05:55 -07:00
amlrelsa-ms
a52d67bb84 update samples from Release-106 as a part of SDK release 2021-07-06 17:17:27 +00:00
Harneet Virk
421ea3d920 Merge pull request #1530 from Azure/release_update/Release-105
update samples from Release-105 as a part of  SDK release
2021-06-25 09:58:05 -07:00
amlrelsa-ms
24f53f1aa1 update samples from Release-105 as a part of SDK release 2021-06-24 23:00:13 +00:00
Harneet Virk
6fc5d11de2 Merge pull request #1518 from Azure/release_update/Release-104
update samples from Release-104 as a part of  SDK release
2021-06-21 10:29:53 -07:00
amlrelsa-ms
d17547d890 update samples from Release-104 as a part of SDK release 2021-06-21 17:16:09 +00:00
Harneet Virk
928e0d4327 Merge pull request #1510 from Azure/release_update/Release-103
update samples from Release-103 as a part of  SDK release
2021-06-14 10:33:34 -07:00
amlrelsa-ms
05327cfbb9 update samples from Release-103 as a part of SDK release 2021-06-14 17:30:30 +00:00
Harneet Virk
8f7717014b Merge pull request #1506 from Azure/release_update/Release-102
update samples from Release-102 as a part of  SDK release 1.30.0
2021-06-07 11:14:02 -07:00
amlrelsa-ms
a47e50b79a update samples from Release-102 as a part of SDK release 2021-06-07 17:34:51 +00:00
Harneet Virk
8f89d88def Merge pull request #1505 from Azure/release_update/Release-101
update samples from Release-101 as a part of  SDK release
2021-06-04 19:54:53 -07:00
amlrelsa-ms
ec97207bb1 update samples from Release-101 as a part of SDK release 2021-06-05 02:54:13 +00:00
Harneet Virk
a2d20b0f47 Merge pull request #1493 from Azure/release_update/Release-98
update samples from Release-98 as a part of  SDK release
2021-05-28 08:04:58 -07:00
amlrelsa-ms
8180cebd75 update samples from Release-98 as a part of SDK release 2021-05-28 03:44:25 +00:00
Harneet Virk
700ab2d782 Merge pull request #1489 from Azure/release_update/Release-97
update samples from Release-97 as a part of  SDK  1.29.0 release
2021-05-25 07:43:14 -07:00
amlrelsa-ms
ec9a5a061d update samples from Release-97 as a part of SDK release 2021-05-24 17:39:23 +00:00
Harneet Virk
467630f955 Merge pull request #1466 from Azure/release_update/Release-96
update samples from Release-96 as a part of  SDK release 1.28.0
2021-05-10 22:48:19 -07:00
amlrelsa-ms
eac6b69bae update samples from Release-96 as a part of SDK release 2021-05-10 18:38:34 +00:00
Harneet Virk
441a5b0141 Merge pull request #1440 from Azure/release_update/Release-95
update samples from Release-95 as a part of  SDK 1.27 release
2021-04-19 11:51:21 -07:00
amlrelsa-ms
70902df6da update samples from Release-95 as a part of SDK release 2021-04-19 18:42:58 +00:00
nikAI77
6f893ff0b4 update samples from Release-94 as a part of SDK release (#1418)
Co-authored-by: amlrelsa-ms <amlrelsa@microsoft.com>
2021-04-06 12:36:12 -04:00
Harneet Virk
bda592a236 Merge pull request #1406 from Azure/release_update/Release-93
update samples from Release-93 as a part of  SDK release
2021-03-24 11:25:00 -07:00
amlrelsa-ms
8b32e8d5ad update samples from Release-93 as a part of SDK release 2021-03-24 16:45:36 +00:00
Harneet Virk
54a065c698 Merge pull request #1386 from yunjie-hub/master
Add synapse sample notebooks
2021-03-09 18:05:10 -08:00
yunjie-hub
b9718678b3 Add files via upload 2021-03-09 18:02:27 -08:00
Harneet Virk
3fa40d2c6d Merge pull request #1385 from Azure/release_update/Release-92
update samples from Release-92 as a part of  SDK release
2021-03-09 17:51:27 -08:00
amlrelsa-ms
883e4a4c59 update samples from Release-92 as a part of SDK release 2021-03-10 01:48:54 +00:00
Harneet Virk
e90826b331 Merge pull request #1384 from yunjie-hub/master
Add synapse sample notebooks
2021-03-09 12:40:33 -08:00
yunjie-hub
ac04172f6d Add files via upload 2021-03-09 12:38:23 -08:00
Harneet Virk
8c0000beb4 Merge pull request #1382 from Azure/release_update/Release-91
update samples from Release-91 as a part of  SDK release
2021-03-08 21:43:10 -08:00
amlrelsa-ms
35287ab0d8 update samples from Release-91 as a part of SDK release 2021-03-09 05:36:08 +00:00
Harneet Virk
3fe4f8b038 Merge pull request #1375 from Azure/release_update/Release-90
update samples from Release-90 as a part of  SDK release
2021-03-01 09:15:14 -08:00
amlrelsa-ms
1722678469 update samples from Release-90 as a part of SDK release 2021-03-01 17:13:25 +00:00
Harneet Virk
17da7e8706 Merge pull request #1364 from Azure/release_update/Release-89
update samples from Release-89 as a part of  SDK release
2021-02-23 17:27:27 -08:00
amlrelsa-ms
d2e7213ff3 update samples from Release-89 as a part of SDK release 2021-02-24 01:26:17 +00:00
mx-iao
882cb76e8a Merge pull request #1361 from Azure/minxia/distr-pytorch
Update distributed pytorch example
2021-02-23 12:07:20 -08:00
392 changed files with 66001 additions and 21611 deletions

9
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,9 @@
# Microsoft Open Source Code of Conduct
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
Resources:
- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns

View File

@@ -1,77 +1,43 @@
# Azure Machine Learning service example notebooks
# Azure Machine Learning Python SDK notebooks
> a community-driven repository of examples using mlflow for tracking can be found at https://github.com/Azure/azureml-examples
This repository contains example notebooks demonstrating the [Azure Machine Learning](https://azure.microsoft.com/services/machine-learning-service/) Python SDK which allows you to build, train, deploy and manage machine learning solutions using Azure. The AML SDK allows you the choice of using local or cloud compute resources, while managing and maintaining the complete data science workflow from the cloud.
Welcome to the Azure Machine Learning Python SDK notebooks repository!
![Azure ML Workflow](https://raw.githubusercontent.com/MicrosoftDocs/azure-docs/master/articles/machine-learning/media/concept-azure-machine-learning-architecture/workflow.png)
## Getting started
These notebooks are recommended for use in an Azure Machine Learning [Compute Instance](https://docs.microsoft.com/azure/machine-learning/concept-compute-instance), where you can run them without any additional set up.
## Quick installation
```sh
pip install azureml-sdk
```
Read more detailed instructions on [how to set up your environment](./NBSETUP.md) using Azure Notebook service, your own Jupyter notebook server, or Docker.
However, the notebooks can be run in any development environment with the correct `azureml` packages installed.
## How to navigate and use the example notebooks?
If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, you should always run the [Configuration](./configuration.ipynb) notebook first when setting up a notebook library on a new machine or in a new environment. It configures your notebook library to connect to an Azure Machine Learning workspace, and sets up your workspace and compute to be used by many of the other examples.
This [index](./index.md) should assist in navigating the Azure Machine Learning notebook samples and encourage efficient retrieval of topics and content.
If you want to...
* ...try out and explore Azure ML, start with image classification tutorials: [Part 1 (Training)](./tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb) and [Part 2 (Deployment)](./tutorials/image-classification-mnist-data/img-classification-part2-deploy.ipynb).
* ...learn about experimentation and tracking run history: [track and monitor experiments](./how-to-use-azureml/track-and-monitor-experiments).
* ...train deep learning models at scale, first learn about [Machine Learning Compute](./how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb), and then try [distributed hyperparameter tuning](./how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) and [distributed training](./how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb).
* ...deploy models as a realtime scoring service, first learn the basics by [deploying to Azure Container Instance](./how-to-use-azureml/deployment/deploy-to-cloud/model-register-and-deploy.ipynb), then learn how to [production deploy models on Azure Kubernetes Cluster](./how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb).
* ...deploy models as a batch scoring service: [create Machine Learning Compute for scoring compute](./how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb) and [use Machine Learning Pipelines to deploy your model](https://aka.ms/pl-batch-scoring).
* ...monitor your deployed models, learn about using [App Insights](./how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb).
## Tutorials
The [Tutorials](./tutorials) folder contains notebooks for the tutorials described in the [Azure Machine Learning documentation](https://aka.ms/aml-docs).
## How to use Azure ML
The [How to use Azure ML](./how-to-use-azureml) folder contains specific examples demonstrating the features of the Azure Machine Learning SDK
- [Training](./how-to-use-azureml/training) - Examples of how to build models using Azure ML's logging and execution capabilities on local and remote compute targets
- [Training with ML and DL frameworks](./how-to-use-azureml/ml-frameworks) - Examples demonstrating how to build and train machine learning models at scale on Azure ML and perform hyperparameter tuning.
- [Manage Azure ML Service](./how-to-use-azureml/manage-azureml-service) - Examples how to perform tasks, such as authenticate against Azure ML service in different ways.
- [Automated Machine Learning](./how-to-use-azureml/automated-machine-learning) - Examples using Automated Machine Learning to automatically generate optimal machine learning pipelines and models
- [Machine Learning Pipelines](./how-to-use-azureml/machine-learning-pipelines) - Examples showing how to create and use reusable pipelines for training and batch scoring
- [Deployment](./how-to-use-azureml/deployment) - Examples showing how to deploy and manage machine learning models and solutions
- [Azure Databricks](./how-to-use-azureml/azure-databricks) - Examples showing how to use Azure ML with Azure Databricks
- [Reinforcement Learning](./how-to-use-azureml/reinforcement-learning) - Examples showing how to train reinforcement learning agents
---
## Documentation
* Quickstarts, end-to-end tutorials, and how-tos on the [official documentation site for Azure Machine Learning service](https://docs.microsoft.com/en-us/azure/machine-learning/service/).
* [Python SDK reference](https://docs.microsoft.com/en-us/python/api/overview/azure/ml/intro?view=azure-ml-py)
* Azure ML Data Prep SDK [overview](https://aka.ms/data-prep-sdk), [Python SDK reference](https://aka.ms/aml-data-prep-apiref), and [tutorials and how-tos](https://aka.ms/aml-data-prep-notebooks).
---
## Community Repository
Visit this [community repository](https://github.com/microsoft/MLOps/tree/master/examples) to find useful end-to-end sample notebooks. Also, please follow these [contribution guidelines](https://github.com/microsoft/MLOps/blob/master/contributing.md) when contributing to this repository.
## Projects using Azure Machine Learning
Visit following repos to see projects contributed by Azure ML users:
- [Learn about Natural Language Processing best practices using Azure Machine Learning service](https://github.com/microsoft/nlp)
- [Pre-Train BERT models using Azure Machine Learning service](https://github.com/Microsoft/AzureML-BERT)
- [Fashion MNIST with Azure ML SDK](https://github.com/amynic/azureml-sdk-fashion)
- [UMass Amherst Student Samples](https://github.com/katiehouse3/microsoft-azure-ml-notebooks) - A number of end-to-end machine learning notebooks, including machine translation, image classification, and customer churn, created by students in the 696DS course at UMass Amherst.
## Data/Telemetry
This repository collects usage data and sends it to Microsoft to help improve our products and services. Read Microsoft's [privacy statement to learn more](https://privacy.microsoft.com/en-US/privacystatement)
To opt out of tracking, please go to the raw markdown or .ipynb files and remove the following line of code:
Install the `azureml.core` Python package:
```sh
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/README.png)"
pip install azureml-core
```
This URL will be slightly different depending on the file.
![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/README.png)
Install additional packages as needed:
```sh
pip install azureml-mlflow
pip install azureml-dataset-runtime
pip install azureml-automl-runtime
pip install azureml-pipeline
pip install azureml-pipeline-steps
...
```
We recommend starting with one of the [quickstarts](tutorials/compute-instance-quickstarts).
## Contributing
This repository is a push-only mirror. Pull requests are ignored.
## Code of Conduct
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). Please see the [code of conduct](CODE_OF_CONDUCT.md) for details.
## Reference
- [Documentation](https://docs.microsoft.com/azure/machine-learning)

View File

@@ -103,7 +103,7 @@
"source": [
"import azureml.core\n",
"\n",
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -254,6 +254,8 @@
"\n",
"Many of the sample notebooks use Azure ML managed compute (AmlCompute) to train models using a dynamically scalable pool of compute. In this section you will create default compute clusters for use by the other notebooks and any other operations you choose.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
"\n",
"To create a cluster, you need to specify a compute configuration that specifies the type of machine to be used and the scalability behaviors. Then you choose a name for the cluster that is unique within the workspace that can be used to address the cluster later.\n",
"\n",
"The cluster parameters are:\n",

View File

@@ -36,9 +36,9 @@
"\n",
"<a id=\"Introduction\"></a>\n",
"## Introduction\n",
"This notebook shows how to use [Fairlearn (an open source fairness assessment and unfairness mitigation package)](http://fairlearn.github.io) and Azure Machine Learning Studio for a binary classification problem. This example uses the well-known adult census dataset. For the purposes of this notebook, we shall treat this as a loan decision problem. We will pretend that the label indicates whether or not each individual repaid a loan in the past. We will use the data to train a predictor to predict whether previously unseen individuals will repay a loan or not. The assumption is that the model predictions are used to decide whether an individual should be offered a loan. Its purpose is purely illustrative of a workflow including a fairness dashboard - in particular, we do **not** include a full discussion of the detailed issues which arise when considering fairness in machine learning. For such discussions, please [refer to the Fairlearn website](http://fairlearn.github.io/).\n",
"This notebook shows how to use [Fairlearn (an open source fairness assessment and unfairness mitigation package)](http://fairlearn.org) and Azure Machine Learning Studio for a binary classification problem. This example uses the well-known adult census dataset. For the purposes of this notebook, we shall treat this as a loan decision problem. We will pretend that the label indicates whether or not each individual repaid a loan in the past. We will use the data to train a predictor to predict whether previously unseen individuals will repay a loan or not. The assumption is that the model predictions are used to decide whether an individual should be offered a loan. Its purpose is purely illustrative of a workflow including a fairness dashboard - in particular, we do **not** include a full discussion of the detailed issues which arise when considering fairness in machine learning. For such discussions, please [refer to the Fairlearn website](http://fairlearn.org/).\n",
"\n",
"We will apply the [grid search algorithm](https://fairlearn.github.io/master/api_reference/fairlearn.reductions.html#fairlearn.reductions.GridSearch) from the Fairlearn package using a specific notion of fairness called Demographic Parity. This produces a set of models, and we will view these in a dashboard both locally and in the Azure Machine Learning Studio.\n",
"We will apply the [grid search algorithm](https://fairlearn.org/v0.4.6/api_reference/fairlearn.reductions.html#fairlearn.reductions.GridSearch) from the Fairlearn package using a specific notion of fairness called Demographic Parity. This produces a set of models, and we will view these in a dashboard both locally and in the Azure Machine Learning Studio.\n",
"\n",
"### Setup\n",
"\n",
@@ -46,9 +46,10 @@
"Please see the [configuration notebook](../../configuration.ipynb) for information about creating one, if required.\n",
"This notebook also requires the following packages:\n",
"* `azureml-contrib-fairness`\n",
"* `fairlearn==0.4.6` (v0.5.0 will work with minor modifications)\n",
"* `fairlearn>=0.6.2` (pre-v0.5.0 will work with minor modifications)\n",
"* `joblib`\n",
"* `shap`\n",
"* `liac-arff`\n",
"* `raiwidgets~=0.7.0`\n",
"\n",
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
]
@@ -85,10 +86,9 @@
"outputs": [],
"source": [
"from fairlearn.reductions import GridSearch, DemographicParity, ErrorRate\n",
"from fairlearn.widget import FairlearnDashboard\n",
"from raiwidgets import FairnessDashboard\n",
"\n",
"from sklearn.compose import ColumnTransformer\n",
"from sklearn.datasets import fetch_openml\n",
"from sklearn.impute import SimpleImputer\n",
"from sklearn.linear_model import LogisticRegression\n",
"from sklearn.model_selection import train_test_split\n",
@@ -112,9 +112,9 @@
"metadata": {},
"outputs": [],
"source": [
"from fairness_nb_utils import fetch_openml_with_retries\n",
"from fairness_nb_utils import fetch_census_dataset\n",
"\n",
"data = fetch_openml_with_retries(data_id=1590)\n",
"data = fetch_census_dataset()\n",
" \n",
"# Extract the items we want\n",
"X_raw = data.data\n",
@@ -137,7 +137,7 @@
"outputs": [],
"source": [
"A = X_raw[['sex','race']]\n",
"X_raw = X_raw.drop(labels=['sex', 'race'],axis = 1)"
"X_raw = X_raw.drop(labels=['sex', 'race'], axis = 1)"
]
},
{
@@ -257,9 +257,9 @@
"metadata": {},
"outputs": [],
"source": [
"FairlearnDashboard(sensitive_features=A_test, sensitive_feature_names=['Sex', 'Race'],\n",
" y_true=y_test,\n",
" y_pred={\"unmitigated\": unmitigated_predictor.predict(X_test)})"
"FairnessDashboard(sensitive_features=A_test,\n",
" y_true=y_test,\n",
" y_pred={\"unmitigated\": unmitigated_predictor.predict(X_test)})"
]
},
{
@@ -312,8 +312,8 @@
"sweep.fit(X_train, y_train,\n",
" sensitive_features=A_train.sex)\n",
"\n",
"# For Fairlearn v0.5.0, need sweep.predictors_\n",
"predictors = sweep._predictors"
"# For Fairlearn pre-v0.5.0, need sweep._predictors\n",
"predictors = sweep.predictors_"
]
},
{
@@ -330,16 +330,14 @@
"outputs": [],
"source": [
"errors, disparities = [], []\n",
"for m in predictors:\n",
" classifier = lambda X: m.predict(X)\n",
" \n",
"for predictor in predictors:\n",
" error = ErrorRate()\n",
" error.load_data(X_train, pd.Series(y_train), sensitive_features=A_train.sex)\n",
" disparity = DemographicParity()\n",
" disparity.load_data(X_train, pd.Series(y_train), sensitive_features=A_train.sex)\n",
" \n",
" errors.append(error.gamma(classifier)[0])\n",
" disparities.append(disparity.gamma(classifier).max())\n",
" errors.append(error.gamma(predictor.predict)[0])\n",
" disparities.append(disparity.gamma(predictor.predict).max())\n",
" \n",
"all_results = pd.DataFrame( {\"predictor\": predictors, \"error\": errors, \"disparity\": disparities})\n",
"\n",
@@ -388,10 +386,9 @@
"metadata": {},
"outputs": [],
"source": [
"FairlearnDashboard(sensitive_features=A_test, \n",
" sensitive_feature_names=['Sex', 'Race'],\n",
" y_true=y_test.tolist(),\n",
" y_pred=predictions_dominant)"
"FairnessDashboard(sensitive_features=A_test, \n",
" y_true=y_test.tolist(),\n",
" y_pred=predictions_dominant)"
]
},
{
@@ -410,7 +407,7 @@
"<a id=\"AzureUpload\"></a>\n",
"## Uploading a Fairness Dashboard to Azure\n",
"\n",
"Uploading a fairness dashboard to Azure is a two stage process. The `FairlearnDashboard` invoked in the previous section relies on the underlying Python kernel to compute metrics on demand. This is obviously not available when the fairness dashboard is rendered in AzureML Studio. By default, the dashboard in Azure Machine Learning Studio also requires the models to be registered. The required stages are therefore:\n",
"Uploading a fairness dashboard to Azure is a two stage process. The `FairnessDashboard` invoked in the previous section relies on the underlying Python kernel to compute metrics on demand. This is obviously not available when the fairness dashboard is rendered in AzureML Studio. By default, the dashboard in Azure Machine Learning Studio also requires the models to be registered. The required stages are therefore:\n",
"1. Register the dominant models\n",
"1. Precompute all the required metrics\n",
"1. Upload to Azure\n",
@@ -584,7 +581,7 @@
"<a id=\"Conclusion\"></a>\n",
"## Conclusion\n",
"\n",
"In this notebook we have demonstrated how to use the `GridSearch` algorithm from Fairlearn to generate a collection of models, and then present them in the fairness dashboard in Azure Machine Learning Studio. Please remember that this notebook has not attempted to discuss the many considerations which should be part of any approach to unfairness mitigation. The [Fairlearn website](http://fairlearn.github.io/) provides that discussion"
"In this notebook we have demonstrated how to use the `GridSearch` algorithm from Fairlearn to generate a collection of models, and then present them in the fairness dashboard in Azure Machine Learning Studio. Please remember that this notebook has not attempted to discuss the many considerations which should be part of any approach to unfairness mitigation. The [Fairlearn website](http://fairlearn.org/) provides that discussion"
]
},
{

View File

@@ -3,5 +3,7 @@ dependencies:
- pip:
- azureml-sdk
- azureml-contrib-fairness
- fairlearn==0.4.6
- fairlearn>=0.6.2
- joblib
- liac-arff
- raiwidgets~=0.16.0

View File

@@ -4,7 +4,13 @@
"""Utilities for azureml-contrib-fairness notebooks."""
import arff
from collections import OrderedDict
from contextlib import closing
import gzip
import pandas as pd
from sklearn.datasets import fetch_openml
from sklearn.utils import Bunch
import time
@@ -15,7 +21,7 @@ def fetch_openml_with_retries(data_id, max_retries=4, retry_delay=60):
print("Download attempt {0} of {1}".format(i + 1, max_retries))
data = fetch_openml(data_id=data_id, as_frame=True)
break
except Exception as e:
except Exception as e: # noqa: B902
print("Download attempt failed with exception:")
print(e)
if i + 1 != max_retries:
@@ -26,3 +32,80 @@ def fetch_openml_with_retries(data_id, max_retries=4, retry_delay=60):
raise RuntimeError("Unable to download dataset from OpenML")
return data
_categorical_columns = [
'workclass',
'education',
'marital-status',
'occupation',
'relationship',
'race',
'sex',
'native-country'
]
def fetch_census_dataset():
"""Fetch the Adult Census Dataset.
This uses a particular URL for the Adult Census dataset. The code
is a simplified version of fetch_openml() in sklearn.
The data are copied from:
https://openml.org/data/v1/download/1595261.gz
(as of 2021-03-31)
"""
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
filename = "1595261.gz"
data_url = "https://rainotebookscdn.blob.core.windows.net/datasets/"
remaining_attempts = 5
sleep_duration = 10
while remaining_attempts > 0:
try:
urlretrieve(data_url + filename, filename)
http_stream = gzip.GzipFile(filename=filename, mode='rb')
with closing(http_stream):
def _stream_generator(response):
for line in response:
yield line.decode('utf-8')
stream = _stream_generator(http_stream)
data = arff.load(stream)
except Exception as exc: # noqa: B902
remaining_attempts -= 1
print("Error downloading dataset from {} ({} attempt(s) remaining)"
.format(data_url, remaining_attempts))
print(exc)
time.sleep(sleep_duration)
sleep_duration *= 2
continue
else:
# dataset successfully downloaded
break
else:
raise Exception("Could not retrieve dataset from {}.".format(data_url))
attributes = OrderedDict(data['attributes'])
arff_columns = list(attributes)
raw_df = pd.DataFrame(data=data['data'], columns=arff_columns)
target_column_name = 'class'
target = raw_df.pop(target_column_name)
for col_name in _categorical_columns:
dtype = pd.api.types.CategoricalDtype(attributes[col_name])
raw_df[col_name] = raw_df[col_name].astype(dtype, copy=False)
result = Bunch()
result.data = raw_df
result.target = target
return result

View File

@@ -30,7 +30,7 @@
"1. [Training Models](#TrainingModels)\n",
"1. [Logging in to AzureML](#LoginAzureML)\n",
"1. [Registering the Models](#RegisterModels)\n",
"1. [Using the Fairlearn Dashboard](#LocalDashboard)\n",
"1. [Using the Fairness Dashboard](#LocalDashboard)\n",
"1. [Uploading a Fairness Dashboard to Azure](#AzureUpload)\n",
" 1. Computing Fairness Metrics\n",
" 1. Uploading to Azure\n",
@@ -48,9 +48,10 @@
"Please see the [configuration notebook](../../configuration.ipynb) for information about creating one, if required.\n",
"This notebook also requires the following packages:\n",
"* `azureml-contrib-fairness`\n",
"* `fairlearn==0.4.6` (should also work with v0.5.0)\n",
"* `fairlearn>=0.6.2` (also works for pre-v0.5.0 with slight modifications)\n",
"* `joblib`\n",
"* `shap`\n",
"* `liac-arff`\n",
"* `raiwidgets~=0.7.0`\n",
"\n",
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
]
@@ -88,7 +89,6 @@
"source": [
"from sklearn import svm\n",
"from sklearn.compose import ColumnTransformer\n",
"from sklearn.datasets import fetch_openml\n",
"from sklearn.impute import SimpleImputer\n",
"from sklearn.linear_model import LogisticRegression\n",
"from sklearn.model_selection import train_test_split\n",
@@ -110,9 +110,9 @@
"metadata": {},
"outputs": [],
"source": [
"from fairness_nb_utils import fetch_openml_with_retries\n",
"from fairness_nb_utils import fetch_census_dataset\n",
"\n",
"data = fetch_openml_with_retries(data_id=1590)\n",
"data = fetch_census_dataset()\n",
" \n",
"# Extract the items we want\n",
"X_raw = data.data\n",
@@ -389,12 +389,11 @@
"metadata": {},
"outputs": [],
"source": [
"from fairlearn.widget import FairlearnDashboard\n",
"from raiwidgets import FairnessDashboard\n",
"\n",
"FairlearnDashboard(sensitive_features=A_test, \n",
" sensitive_feature_names=['Sex', 'Race'],\n",
" y_true=y_test.tolist(),\n",
" y_pred=ys_pred)"
"FairnessDashboard(sensitive_features=A_test, \n",
" y_true=y_test.tolist(),\n",
" y_pred=ys_pred)"
]
},
{
@@ -404,7 +403,7 @@
"<a id=\"AzureUpload\"></a>\n",
"## Uploading a Fairness Dashboard to Azure\n",
"\n",
"Uploading a fairness dashboard to Azure is a two stage process. The `FairlearnDashboard` invoked in the previous section relies on the underlying Python kernel to compute metrics on demand. This is obviously not available when the fairness dashboard is rendered in AzureML Studio. The required stages are therefore:\n",
"Uploading a fairness dashboard to Azure is a two stage process. The `FairnessDashboard` invoked in the previous section relies on the underlying Python kernel to compute metrics on demand. This is obviously not available when the fairness dashboard is rendered in AzureML Studio. The required stages are therefore:\n",
"1. Precompute all the required metrics\n",
"1. Upload to Azure\n",
"\n",

View File

@@ -3,5 +3,7 @@ dependencies:
- pip:
- azureml-sdk
- azureml-contrib-fairness
- fairlearn==0.4.6
- fairlearn>=0.6.2
- joblib
- liac-arff
- raiwidgets~=0.16.0

View File

@@ -2,9 +2,8 @@ name: azure_automl
dependencies:
# The python interpreter version.
# Currently Azure ML only supports 3.5.2 and later.
- pip==20.2.4
- pip==21.1.2
- python>=3.5.2,<3.8
- nb_conda
- boto3==1.15.18
- matplotlib==2.1.0
- numpy==1.18.5
@@ -18,12 +17,13 @@ dependencies:
- holidays==0.9.11
- pytorch::pytorch=1.4.0
- cudatoolkit=10.1.243
- tornado==6.1.0
- pip:
# Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.23.0
- azureml-widgets~=1.38.0
- pytorch-transformers==1.0.0
- spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.23.0/validated_win32_requirements.txt [--no-deps]
- PyJWT < 2.0.0
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.38.0/validated_win32_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -2,9 +2,8 @@ name: azure_automl
dependencies:
# The python interpreter version.
# Currently Azure ML only supports 3.5.2 and later.
- pip==20.2.4
- pip==21.1.2
- python>=3.5.2,<3.8
- nb_conda
- boto3==1.15.18
- matplotlib==2.1.0
- numpy==1.18.5
@@ -18,13 +17,13 @@ dependencies:
- holidays==0.9.11
- pytorch::pytorch=1.4.0
- cudatoolkit=10.1.243
- tornado==6.1.0
- pip:
# Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.23.0
- azureml-widgets~=1.38.0
- pytorch-transformers==1.0.0
- spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.23.0/validated_linux_requirements.txt [--no-deps]
- PyJWT < 2.0.0
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.38.0/validated_linux_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -2,10 +2,9 @@ name: azure_automl
dependencies:
# The python interpreter version.
# Currently Azure ML only supports 3.5.2 and later.
- pip==20.2.4
- pip==21.1.2
- nomkl
- python>=3.5.2,<3.8
- nb_conda
- boto3==1.15.18
- matplotlib==2.1.0
- numpy==1.18.5
@@ -19,12 +18,13 @@ dependencies:
- holidays==0.9.11
- pytorch::pytorch=1.4.0
- cudatoolkit=9.0
- tornado==6.1.0
- pip:
# Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.23.0
- azureml-widgets~=1.38.0
- pytorch-transformers==1.0.0
- spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.23.0/validated_darwin_requirements.txt [--no-deps]
- PyJWT < 2.0.0
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.38.0/validated_darwin_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -32,6 +32,7 @@ if [ $? -ne 0 ]; then
fi
sed -i '' 's/AZUREML-SDK-VERSION/latest/' $AUTOML_ENV_FILE
brew install libomp
if source activate $CONDA_ENV_NAME 2> /dev/null
then

View File

@@ -3,7 +3,7 @@ import platform
try:
import conda
except:
except Exception:
print('Failed to import conda.')
print('This setup is usually run from the base conda environment.')
print('You can activate the base environment using the command "conda activate base"')

View File

@@ -0,0 +1,4 @@
name: auto-ml-classification-bank-marketing-all-features
dependencies:
- pip:
- azureml-sdk

View File

@@ -1,503 +1,483 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated Machine Learning\n",
"_**Classification of credit card fraudulent transactions on remote compute **_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Train](#Train)\n",
"1. [Results](#Results)\n",
"1. [Test](#Test)\n",
"1. [Acknowledgements](#Acknowledgements)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"\n",
"In this example we use the associated credit card dataset to showcase how you can use AutoML for a simple classification problem. The goal is to predict if a credit card transaction is considered a fraudulent charge.\n",
"\n",
"This notebook is using remote compute to train the model.\n",
"\n",
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
"\n",
"In this notebook you will learn how to:\n",
"1. Create an experiment using an existing workspace.\n",
"2. Configure AutoML using `AutoMLConfig`.\n",
"3. Train the model using remote compute.\n",
"4. Explore the results.\n",
"5. Test the fitted model."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"from matplotlib import pyplot as plt\n",
"import pandas as pd\n",
"import os\n",
"\n",
"import azureml.core\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.core.dataset import Dataset\n",
"from azureml.train.automl import AutoMLConfig"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for experiment\n",
"experiment_name = 'automl-classification-ccard-remote'\n",
"\n",
"experiment=Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create or Attach existing AmlCompute\n",
"A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# Choose a name for your CPU cluster\n",
"cpu_cluster_name = \"cpu-cluster-1\"\n",
"\n",
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print('Found existing cluster, use it.')\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
" max_nodes=6)\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load Data\n",
"\n",
"Load the credit card dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
"label_column_name = 'Class'"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train\n",
"\n",
"Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.\n",
"\n",
"|Property|Description|\n",
"|-|-|\n",
"|**task**|classification or regression|\n",
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
"|**enable_early_stopping**|Stop the run if the metric score is not showing improvement.|\n",
"|**n_cross_validations**|Number of cross validation splits.|\n",
"|**training_data**|Input dataset, containing both features and label column.|\n",
"|**label_column_name**|The name of the label column.|\n",
"\n",
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_settings = {\n",
" \"n_cross_validations\": 3,\n",
" \"primary_metric\": 'average_precision_score_weighted',\n",
" \"enable_early_stopping\": True,\n",
" \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n",
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n",
" \"verbosity\": logging.INFO,\n",
"}\n",
"\n",
"automl_config = AutoMLConfig(task = 'classification',\n",
" debug_log = 'automl_errors.log',\n",
" compute_target = compute_target,\n",
" training_data = training_data,\n",
" label_column_name = label_column_name,\n",
" **automl_settings\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run = experiment.submit(automl_config, show_output = False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# If you need to retrieve a run that already started, use the following code\n",
"#from azureml.train.automl.run import AutoMLRun\n",
"#remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Widget for Monitoring Runs\n",
"\n",
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
"\n",
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"widget-rundetails-sample"
]
},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"RunDetails(remote_run).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Explain model\n",
"\n",
"Automated ML models can be explained and visualized using the SDK Explainability library. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Analyze results\n",
"\n",
"### Retrieve the Best Model\n",
"\n",
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = remote_run.get_output()\n",
"fitted_model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Print the properties of the model\n",
"The fitted_model is a python object and you can read the different properties of the object.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the fitted model\n",
"\n",
"Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# convert the test data to dataframe\n",
"X_test_df = validation_data.drop_columns(columns=[label_column_name]).to_pandas_dataframe()\n",
"y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True).to_pandas_dataframe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# call the predict functions on the model\n",
"y_pred = fitted_model.predict(X_test_df)\n",
"y_pred"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Calculate metrics for the prediction\n",
"\n",
"Now visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values \n",
"from the trained model that was returned."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.metrics import confusion_matrix\n",
"import numpy as np\n",
"import itertools\n",
"\n",
"cf =confusion_matrix(y_test_df.values,y_pred)\n",
"plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')\n",
"plt.colorbar()\n",
"plt.title('Confusion Matrix')\n",
"plt.xlabel('Predicted')\n",
"plt.ylabel('Actual')\n",
"class_labels = ['False','True']\n",
"tick_marks = np.arange(len(class_labels))\n",
"plt.xticks(tick_marks,class_labels)\n",
"plt.yticks([-0.5,0,1,1.5],['','False','True',''])\n",
"# plotting text value inside cells\n",
"thresh = cf.max() / 2.\n",
"for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])):\n",
" plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Acknowledgements"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud\n",
"\n",
"The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00c3\u00a9 Libre de Bruxelles) on big data mining and fraud detection.\n",
"More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project\n",
"\n",
"Please cite the following works:\n",
"\n",
"Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n",
"\n",
"Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\n",
"\n",
"Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n",
"\n",
"Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\n",
"\n",
"Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00c3\u00abl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\n",
"\n",
"Carcillo, Fabrizio; Le Borgne, Yann-A\u00c3\u00abl; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\n",
"\n",
"Bertrand Lebichot, Yann-A\u00c3\u00abl Le Borgne, Liyun He, Frederic Obl\u00c3\u00a9, Gianluca Bontempi Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection, INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\n",
"\n",
"Fabrizio Carcillo, Yann-A\u00c3\u00abl Le Borgne, Olivier Caelen, Frederic Obl\u00c3\u00a9, Gianluca Bontempi Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection Information Sciences, 2019"
]
}
],
"metadata": {
"authors": [
{
"name": "ratanase"
}
],
"category": "tutorial",
"compute": [
"AML Compute"
],
"datasets": [
"Creditcard"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"file_extension": ".py",
"framework": [
"None"
],
"friendly_name": "Classification of credit card fraudulent transactions using Automated ML",
"index_order": 5,
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
},
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"tags": [
"remote_run",
"AutomatedML"
],
"task": "Classification",
"version": "3.6.7"
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated Machine Learning\n",
"_**Classification of credit card fraudulent transactions on remote compute **_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Train](#Train)\n",
"1. [Results](#Results)\n",
"1. [Test](#Test)\n",
"1. [Acknowledgements](#Acknowledgements)"
]
},
"nbformat": 4,
"nbformat_minor": 2
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"\n",
"In this example we use the associated credit card dataset to showcase how you can use AutoML for a simple classification problem. The goal is to predict if a credit card transaction is considered a fraudulent charge.\n",
"\n",
"This notebook is using remote compute to train the model.\n",
"\n",
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
"\n",
"In this notebook you will learn how to:\n",
"1. Create an experiment using an existing workspace.\n",
"2. Configure AutoML using `AutoMLConfig`.\n",
"3. Train the model using remote compute.\n",
"4. Explore the results.\n",
"5. Test the fitted model."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"from matplotlib import pyplot as plt\n",
"import pandas as pd\n",
"import os\n",
"\n",
"import azureml.core\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.core.dataset import Dataset\n",
"from azureml.train.automl import AutoMLConfig"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for experiment\n",
"experiment_name = \"automl-classification-ccard-remote\"\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Experiment Name\"] = experiment.name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create or Attach existing AmlCompute\n",
"A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
"\n",
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# Choose a name for your CPU cluster\n",
"cpu_cluster_name = \"cpu-cluster-1\"\n",
"\n",
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"compute_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load Data\n",
"\n",
"Load the credit card dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
"label_column_name = \"Class\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train\n",
"\n",
"Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.\n",
"\n",
"|Property|Description|\n",
"|-|-|\n",
"|**task**|classification or regression|\n",
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
"|**enable_early_stopping**|Stop the run if the metric score is not showing improvement.|\n",
"|**n_cross_validations**|Number of cross validation splits.|\n",
"|**training_data**|Input dataset, containing both features and label column.|\n",
"|**label_column_name**|The name of the label column.|\n",
"\n",
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_settings = {\n",
" \"n_cross_validations\": 3,\n",
" \"primary_metric\": \"average_precision_score_weighted\",\n",
" \"enable_early_stopping\": True,\n",
" \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n",
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n",
" \"verbosity\": logging.INFO,\n",
"}\n",
"\n",
"automl_config = AutoMLConfig(\n",
" task=\"classification\",\n",
" debug_log=\"automl_errors.log\",\n",
" compute_target=compute_target,\n",
" training_data=training_data,\n",
" label_column_name=label_column_name,\n",
" **automl_settings,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run = experiment.submit(automl_config, show_output=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# If you need to retrieve a run that already started, use the following code\n",
"# from azureml.train.automl.run import AutoMLRun\n",
"# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Widget for Monitoring Runs\n",
"\n",
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
"\n",
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"widget-rundetails-sample"
]
},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"\n",
"RunDetails(remote_run).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Explain model\n",
"\n",
"Automated ML models can be explained and visualized using the SDK Explainability library. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Analyze results\n",
"\n",
"### Retrieve the Best Model\n",
"\n",
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = remote_run.get_output()\n",
"fitted_model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Print the properties of the model\n",
"The fitted_model is a python object and you can read the different properties of the object.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the fitted model\n",
"\n",
"Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# convert the test data to dataframe\n",
"X_test_df = validation_data.drop_columns(\n",
" columns=[label_column_name]\n",
").to_pandas_dataframe()\n",
"y_test_df = validation_data.keep_columns(\n",
" columns=[label_column_name], validate=True\n",
").to_pandas_dataframe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# call the predict functions on the model\n",
"y_pred = fitted_model.predict(X_test_df)\n",
"y_pred"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Calculate metrics for the prediction\n",
"\n",
"Now visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values \n",
"from the trained model that was returned."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.metrics import confusion_matrix\n",
"import numpy as np\n",
"import itertools\n",
"\n",
"cf = confusion_matrix(y_test_df.values, y_pred)\n",
"plt.imshow(cf, cmap=plt.cm.Blues, interpolation=\"nearest\")\n",
"plt.colorbar()\n",
"plt.title(\"Confusion Matrix\")\n",
"plt.xlabel(\"Predicted\")\n",
"plt.ylabel(\"Actual\")\n",
"class_labels = [\"False\", \"True\"]\n",
"tick_marks = np.arange(len(class_labels))\n",
"plt.xticks(tick_marks, class_labels)\n",
"plt.yticks([-0.5, 0, 1, 1.5], [\"\", \"False\", \"True\", \"\"])\n",
"# plotting text value inside cells\n",
"thresh = cf.max() / 2.0\n",
"for i, j in itertools.product(range(cf.shape[0]), range(cf.shape[1])):\n",
" plt.text(\n",
" j,\n",
" i,\n",
" format(cf[i, j], \"d\"),\n",
" horizontalalignment=\"center\",\n",
" color=\"white\" if cf[i, j] > thresh else \"black\",\n",
" )\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Acknowledgements"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud\n",
"\n",
"The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection.\n",
"More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project\n",
"\n",
"Please cite the following works:\n",
"\n",
"Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n",
"\n",
"Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\n",
"\n",
"Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n",
"\n",
"Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\n",
"\n",
"Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Aël; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\n",
"\n",
"Carcillo, Fabrizio; Le Borgne, Yann-Aël; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\n",
"\n",
"Bertrand Lebichot, Yann-Aël Le Borgne, Liyun He, Frederic Oblé, Gianluca Bontempi Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection, INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\n",
"\n",
"Fabrizio Carcillo, Yann-Aël Le Borgne, Olivier Caelen, Frederic Oblé, Gianluca Bontempi Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection Information Sciences, 2019"
]
}
],
"metadata": {
"authors": [
{
"name": "ratanase"
}
],
"category": "tutorial",
"compute": [
"AML Compute"
],
"datasets": [
"Creditcard"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"file_extension": ".py",
"framework": [
"None"
],
"friendly_name": "Classification of credit card fraudulent transactions using Automated ML",
"index_order": 5,
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
},
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"tags": [
"remote_run",
"AutomatedML"
],
"task": "Classification",
"version": "3.6.7"
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-classification-credit-card-fraud
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,4 @@
name: auto-ml-classification-text-dnn
dependencies:
- pip:
- azureml-sdk

View File

@@ -4,53 +4,65 @@ from azureml.train.estimator import Estimator
from azureml.core.run import Run
def run_inference(test_experiment, compute_target, script_folder, train_run,
train_dataset, test_dataset, target_column_name, model_name):
def run_inference(
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
target_column_name,
model_name,
):
inference_env = train_run.get_environment()
est = Estimator(source_directory=script_folder,
entry_script='infer.py',
script_params={
'--target_column_name': target_column_name,
'--model_name': model_name
},
inputs=[
train_dataset.as_named_input('train_data'),
test_dataset.as_named_input('test_data')
],
compute_target=compute_target,
environment_definition=inference_env)
est = Estimator(
source_directory=script_folder,
entry_script="infer.py",
script_params={
"--target_column_name": target_column_name,
"--model_name": model_name,
},
inputs=[test_dataset.as_named_input("test_data")],
compute_target=compute_target,
environment_definition=inference_env,
)
run = test_experiment.submit(
est, tags={
'training_run_id': train_run.id,
'run_algorithm': train_run.properties['run_algorithm'],
'valid_score': train_run.properties['score'],
'primary_metric': train_run.properties['primary_metric']
})
est,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags['run_algorithm'])
run.log("run_algorithm", run.tags["run_algorithm"])
return run
def get_result_df(remote_run):
children = list(remote_run.get_children(recursive=True))
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm',
'primary_metric', 'Score'])
summary_df = pd.DataFrame(
index=["run_id", "run_algorithm", "primary_metric", "Score"]
)
goal_minimize = False
for run in children:
if('run_algorithm' in run.properties and 'score' in run.properties):
summary_df[run.id] = [run.id, run.properties['run_algorithm'],
run.properties['primary_metric'],
float(run.properties['score'])]
if('goal' in run.properties):
goal_minimize = run.properties['goal'].split('_')[-1] == 'min'
if "run_algorithm" in run.properties and "score" in run.properties:
summary_df[run.id] = [
run.id,
run.properties["run_algorithm"],
run.properties["primary_metric"],
float(run.properties["score"]),
]
if "goal" in run.properties:
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
summary_df = summary_df.T.sort_values(
'Score',
ascending=goal_minimize).drop_duplicates(['run_algorithm'])
summary_df = summary_df.set_index('run_algorithm')
"Score", ascending=goal_minimize
).drop_duplicates(["run_algorithm"])
summary_df = summary_df.set_index("run_algorithm")
return summary_df

View File

@@ -1,5 +1,6 @@
import argparse
import pandas as pd
import numpy as np
from sklearn.externals import joblib
@@ -11,19 +12,22 @@ from azureml.core.model import Model
parser = argparse.ArgumentParser()
parser.add_argument(
'--target_column_name', type=str, dest='target_column_name',
help='Target Column Name')
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
'--model_name', type=str, dest='model_name',
help='Name of registered model')
"--model_name", type=str, dest="model_name", help="Name of registered model"
)
args = parser.parse_args()
target_column_name = args.target_column_name
model_name = args.model_name
print('args passed are: ')
print('Target column name: ', target_column_name)
print('Name of registered model: ', model_name)
print("args passed are: ")
print("Target column name: ", target_column_name)
print("Name of registered model: ", model_name)
model_path = Model.get_model_path(model_name)
# deserialize the model file back into a sklearn model
@@ -31,27 +35,31 @@ model = joblib.load(model_path)
run = Run.get_context()
# get input dataset by name
test_dataset = run.input_datasets['test_data']
train_dataset = run.input_datasets['train_data']
test_dataset = run.input_datasets["test_data"]
X_test_df = test_dataset.drop_columns(columns=[target_column_name]) \
.to_pandas_dataframe()
y_test_df = test_dataset.with_timestamp_columns(None) \
.keep_columns(columns=[target_column_name]) \
.to_pandas_dataframe()
y_train_df = test_dataset.with_timestamp_columns(None) \
.keep_columns(columns=[target_column_name]) \
.to_pandas_dataframe()
X_test_df = test_dataset.drop_columns(
columns=[target_column_name]
).to_pandas_dataframe()
y_test_df = (
test_dataset.with_timestamp_columns(None)
.keep_columns(columns=[target_column_name])
.to_pandas_dataframe()
)
predicted = model.predict_proba(X_test_df)
if isinstance(predicted, pd.DataFrame):
predicted = predicted.values
# Use the AutoML scoring module
class_labels = np.unique(np.concatenate((y_train_df.values, y_test_df.values)))
train_labels = model.classes_
class_labels = np.unique(
np.concatenate((y_test_df.values, np.reshape(train_labels, (-1, 1))))
)
classification_metrics = list(constants.CLASSIFICATION_SCALAR_SET)
scores = scoring.score_classification(y_test_df.values, predicted,
classification_metrics,
class_labels, train_labels)
scores = scoring.score_classification(
y_test_df.values, predicted, classification_metrics, class_labels, train_labels
)
print("scores:")
print(scores)

View File

@@ -0,0 +1,4 @@
name: auto-ml-continuous-retraining
dependencies:
- pip:
- azureml-sdk

View File

@@ -25,9 +25,11 @@ datasets = [(Dataset.Scenario.TRAINING, train_ds)]
# Register model with training dataset
model = Model.register(workspace=ws,
model_path=args.model_path,
model_name=args.model_name,
datasets=datasets)
model = Model.register(
workspace=ws,
model_path=args.model_path,
model_name=args.model_name,
datasets=datasets,
)
print("Registered version {0} of model {1}".format(model.version, model.name))

View File

@@ -16,26 +16,82 @@ if type(run) == _OfflineRun:
else:
ws = run.experiment.workspace
usaf_list = ['725724', '722149', '723090', '722159', '723910', '720279',
'725513', '725254', '726430', '720381', '723074', '726682',
'725486', '727883', '723177', '722075', '723086', '724053',
'725070', '722073', '726060', '725224', '725260', '724520',
'720305', '724020', '726510', '725126', '722523', '703333',
'722249', '722728', '725483', '722972', '724975', '742079',
'727468', '722193', '725624', '722030', '726380', '720309',
'722071', '720326', '725415', '724504', '725665', '725424',
'725066']
usaf_list = [
"725724",
"722149",
"723090",
"722159",
"723910",
"720279",
"725513",
"725254",
"726430",
"720381",
"723074",
"726682",
"725486",
"727883",
"723177",
"722075",
"723086",
"724053",
"725070",
"722073",
"726060",
"725224",
"725260",
"724520",
"720305",
"724020",
"726510",
"725126",
"722523",
"703333",
"722249",
"722728",
"725483",
"722972",
"724975",
"742079",
"727468",
"722193",
"725624",
"722030",
"726380",
"720309",
"722071",
"720326",
"725415",
"724504",
"725665",
"725424",
"725066",
]
def get_noaa_data(start_time, end_time):
columns = ['usaf', 'wban', 'datetime', 'latitude', 'longitude', 'elevation',
'windAngle', 'windSpeed', 'temperature', 'stationName', 'p_k']
columns = [
"usaf",
"wban",
"datetime",
"latitude",
"longitude",
"elevation",
"windAngle",
"windSpeed",
"temperature",
"stationName",
"p_k",
]
isd = NoaaIsdWeather(start_time, end_time, cols=columns)
noaa_df = isd.to_pandas_dataframe()
df_filtered = noaa_df[noaa_df["usaf"].isin(usaf_list)]
df_filtered.reset_index(drop=True)
print("Received {0} rows of training data between {1} and {2}".format(
df_filtered.shape[0], start_time, end_time))
print(
"Received {0} rows of training data between {1} and {2}".format(
df_filtered.shape[0], start_time, end_time
)
)
return df_filtered
@@ -49,41 +105,53 @@ print("Argument 1(ds_name): %s" % args.ds_name)
dstor = ws.get_default_datastore()
register_dataset = False
end_time = datetime.utcnow()
try:
ds = Dataset.get_by_name(ws, args.ds_name)
end_time_last_slice = ds.data_changed_time.replace(tzinfo=None)
print("Dataset {0} last updated on {1}".format(args.ds_name,
end_time_last_slice))
except Exception as e:
print("Dataset {0} last updated on {1}".format(args.ds_name, end_time_last_slice))
except Exception:
print(traceback.format_exc())
print("Dataset with name {0} not found, registering new dataset.".format(args.ds_name))
print(
"Dataset with name {0} not found, registering new dataset.".format(args.ds_name)
)
register_dataset = True
end_time_last_slice = datetime.today() - relativedelta(weeks=2)
end_time = datetime(2021, 5, 1, 0, 0)
end_time_last_slice = end_time - relativedelta(weeks=2)
end_time = datetime.utcnow()
train_df = get_noaa_data(end_time_last_slice, end_time)
if train_df.size > 0:
print("Received {0} rows of new data after {0}.".format(
train_df.shape[0], end_time_last_slice))
folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(args.ds_name, end_time.year,
end_time.month, end_time.day,
end_time.hour, end_time.minute,
end_time.second)
print(
"Received {0} rows of new data after {1}.".format(
train_df.shape[0], end_time_last_slice
)
)
folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(
args.ds_name,
end_time.year,
end_time.month,
end_time.day,
end_time.hour,
end_time.minute,
end_time.second,
)
file_path = "{0}/data.csv".format(folder_name)
# Add a new partition to the registered dataset
os.makedirs(folder_name, exist_ok=True)
train_df.to_csv(file_path, index=False)
dstor.upload_files(files=[file_path],
target_path=folder_name,
overwrite=True,
show_progress=True)
dstor.upload_files(
files=[file_path], target_path=folder_name, overwrite=True, show_progress=True
)
else:
print("No new data since {0}.".format(end_time_last_slice))
if register_dataset:
ds = Dataset.Tabular.from_delimited_files(dstor.path("{}/**/*.csv".format(
args.ds_name)), partition_format='/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv')
ds = Dataset.Tabular.from_delimited_files(
dstor.path("{}/**/*.csv".format(args.ds_name)),
partition_format="/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv",
)
ds.register(ws, name=args.ds_name)

View File

@@ -5,7 +5,7 @@ set options=%3
set PIP_NO_WARN_SCRIPT_LOCATION=0
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl_experimental"
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml"
IF "%automl_env_file%"=="" SET automl_env_file="automl_thin_client_env.yml"
IF NOT EXIST %automl_env_file% GOTO YmlMissing

View File

@@ -12,7 +12,7 @@ fi
if [ "$AUTOML_ENV_FILE" == "" ]
then
AUTOML_ENV_FILE="automl_env.yml"
AUTOML_ENV_FILE="automl_thin_client_env.yml"
fi
if [ ! -f $AUTOML_ENV_FILE ]; then

View File

@@ -12,7 +12,7 @@ fi
if [ "$AUTOML_ENV_FILE" == "" ]
then
AUTOML_ENV_FILE="automl_env.yml"
AUTOML_ENV_FILE="automl_thin_client_env_mac.yml"
fi
if [ ! -f $AUTOML_ENV_FILE ]; then

View File

@@ -4,9 +4,10 @@ dependencies:
# Currently Azure ML only supports 3.5.2 and later.
- pip<=19.3.1
- python>=3.5.2,<3.8
- nb_conda
- cython
- urllib3<1.24
- PyJWT < 2.0.0
- numpy==1.18.5
- pip:
# Required packages for AzureML execution, history, and data preparation.
@@ -14,4 +15,3 @@ dependencies:
- azureml-sdk
- azureml-widgets
- pandas
- PyJWT < 2.0.0

View File

@@ -5,9 +5,10 @@ dependencies:
- pip<=19.3.1
- nomkl
- python>=3.5.2,<3.8
- nb_conda
- cython
- urllib3<1.24
- PyJWT < 2.0.0
- numpy==1.18.5
- pip:
# Required packages for AzureML execution, history, and data preparation.
@@ -15,4 +16,3 @@ dependencies:
- azureml-sdk
- azureml-widgets
- pandas
- PyJWT < 2.0.0

View File

@@ -0,0 +1,420 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/experimental/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated Machine Learning\n",
"_**Classification of credit card fraudulent transactions on local managed compute **_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Train](#Train)\n",
"1. [Results](#Results)\n",
"1. [Test](#Test)\n",
"1. [Acknowledgements](#Acknowledgements)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"\n",
"In this example we use the associated credit card dataset to showcase how you can use AutoML for a simple classification problem. The goal is to predict if a credit card transaction is considered a fraudulent charge.\n",
"\n",
"This notebook is using local managed compute to train the model.\n",
"\n",
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
"\n",
"In this notebook you will learn how to:\n",
"1. Create an experiment using an existing workspace.\n",
"2. Configure AutoML using `AutoMLConfig`.\n",
"3. Train the model using local managed compute.\n",
"4. Explore the results.\n",
"5. Test the fitted model."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"import pandas as pd\n",
"\n",
"import azureml.core\n",
"from azureml.core.compute_target import LocalTarget\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.core.dataset import Dataset\n",
"from azureml.train.automl import AutoMLConfig"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for experiment\n",
"experiment_name = 'automl-local-managed'\n",
"\n",
"experiment=Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Determine if local docker is configured for Linux images\n",
"\n",
"Local managed runs will leverage a Linux docker container to submit the run to. Due to this, the docker needs to be configured to use Linux containers."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Check if Docker is installed and Linux containers are enabled\n",
"import subprocess\n",
"from subprocess import CalledProcessError\n",
"try:\n",
" assert subprocess.run(\"docker -v\", shell=True).returncode == 0, 'Local Managed runs require docker to be installed.'\n",
" out = subprocess.check_output(\"docker system info\", shell=True).decode('ascii')\n",
" assert \"OSType: linux\" in out, 'Docker engine needs to be configured to use Linux containers.' \\\n",
" 'https://docs.docker.com/docker-for-windows/#switch-between-windows-and-linux-containers'\n",
"except CalledProcessError as ex:\n",
" raise Exception('Local Managed runs require docker to be installed.') from ex"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load Data\n",
"\n",
"Load the credit card dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
"label_column_name = 'Class'"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train\n",
"\n",
"Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.\n",
"\n",
"|Property|Description|\n",
"|-|-|\n",
"|**task**|classification or regression|\n",
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
"|**enable_early_stopping**|Stop the run if the metric score is not showing improvement.|\n",
"|**n_cross_validations**|Number of cross validation splits.|\n",
"|**training_data**|Input dataset, containing both features and label column.|\n",
"|**label_column_name**|The name of the label column.|\n",
"|**enable_local_managed**|Enable the experimental local-managed scenario.|\n",
"\n",
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_settings = {\n",
" \"n_cross_validations\": 3,\n",
" \"primary_metric\": 'average_precision_score_weighted',\n",
" \"enable_early_stopping\": True,\n",
" \"experiment_timeout_hours\": 0.3, #for real scenarios we recommend a timeout of at least one hour \n",
" \"verbosity\": logging.INFO,\n",
"}\n",
"\n",
"automl_config = AutoMLConfig(task = 'classification',\n",
" debug_log = 'automl_errors.log',\n",
" compute_target = LocalTarget(),\n",
" enable_local_managed = True,\n",
" training_data = training_data,\n",
" label_column_name = label_column_name,\n",
" **automl_settings\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"parent_run = experiment.submit(automl_config, show_output = True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# If you need to retrieve a run that already started, use the following code\n",
"#from azureml.train.automl.run import AutoMLRun\n",
"#parent_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"parent_run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Explain model\n",
"\n",
"Automated ML models can be explained and visualized using the SDK Explainability library. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Analyze results\n",
"\n",
"### Retrieve the Best Child Run\n",
"\n",
"Below we select the best pipeline from our iterations. The `get_best_child` method returns the best run. Overloads on `get_best_child` allow you to retrieve the best run for *any* logged metric."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_run = parent_run.get_best_child()\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the fitted model\n",
"\n",
"Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_test_df = validation_data.drop_columns(columns=[label_column_name])\n",
"y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Creating ModelProxy for submitting prediction runs to the training environment.\n",
"We will create a ModelProxy for the best child run, which will allow us to submit a run that does the prediction in the training environment. Unlike the local client, which can have different versions of some libraries, the training environment will have all the compatible libraries for the model already."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.train.automl.model_proxy import ModelProxy\n",
"best_model_proxy = ModelProxy(best_run)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# call the predict functions on the model proxy\n",
"y_pred = best_model_proxy.predict(X_test_df).to_pandas_dataframe()\n",
"y_pred"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Acknowledgements"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud\n",
"\n",
"\n",
"The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00c3\u0192\u00c2\u00a9 Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project\n",
"Please cite the following works: \n",
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tAndrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n",
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tDal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\n",
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tDal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n",
"o\tDal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\n",
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tCarcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00c3\u0192\u00c2\u00abl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\n",
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tCarcillo, Fabrizio; Le Borgne, Yann-A\u00c3\u0192\u00c2\u00abl; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing"
]
}
],
"metadata": {
"authors": [
{
"name": "sekrupa"
}
],
"category": "tutorial",
"compute": [
"AML Compute"
],
"datasets": [
"Creditcard"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"file_extension": ".py",
"framework": [
"None"
],
"friendly_name": "Classification of credit card fraudulent transactions using Automated ML",
"index_order": 5,
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
},
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"tags": [
"AutomatedML"
],
"task": "Classification",
"version": "3.6.7"
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-classification-credit-card-fraud-local-managed
dependencies:
- pip:
- azureml-sdk

View File

@@ -39,6 +39,7 @@
"source": [
"## Introduction\n",
"In this example we use an experimental feature, Model Proxy, to do a predict on the best generated model without downloading the model locally. The prediction will happen on same compute and environment that was used to train the model. This feature is currently in the experimental state, which means that the API is prone to changing, please make sure to run on the latest version of this notebook if you face any issues.\n",
"This notebook will also leverage MLFlow for saving models, allowing for more portability of the resulting models. See https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-mlflow for more details around MLFlow is AzureML.\n",
"\n",
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
"\n",
@@ -90,7 +91,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -142,7 +143,7 @@
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print('Found existing cluster, use it.')\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
" max_nodes=4)\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"\n",
@@ -194,7 +195,6 @@
"|**n_cross_validations**|Number of cross validation splits.|\n",
"|**training_data**|(sparse) array-like, shape = [n_samples, n_features]|\n",
"|**label_column_name**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
"|**scenario**|We need to set this parameter to 'Latest' to enable some experimental features. This parameter should not be set outside of this experimental notebook.|\n",
"\n",
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
]
@@ -213,17 +213,17 @@
" \"n_cross_validations\": 3,\n",
" \"primary_metric\": 'r2_score',\n",
" \"enable_early_stopping\": True, \n",
" \"experiment_timeout_hours\": 0.3, #for real scenarios we reccommend a timeout of at least one hour \n",
" \"experiment_timeout_hours\": 0.3, #for real scenarios we recommend a timeout of at least one hour \n",
" \"max_concurrent_iterations\": 4,\n",
" \"max_cores_per_iteration\": -1,\n",
" \"verbosity\": logging.INFO,\n",
" \"save_mlflow\": True,\n",
"}\n",
"\n",
"automl_config = AutoMLConfig(task = 'regression',\n",
" compute_target = compute_target,\n",
" training_data = train_data,\n",
" label_column_name = label,\n",
" scenario='Latest',\n",
" **automl_settings\n",
" )"
]

View File

@@ -0,0 +1,4 @@
name: auto-ml-regression-model-proxy
dependencies:
- pip:
- azureml-sdk

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@@ -0,0 +1,167 @@
from typing import Any, Dict, Optional, List
import argparse
import json
import os
import re
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from azureml.automl.core.shared import constants
from azureml.automl.core.shared.types import GrainType
from azureml.automl.runtime.shared.score import scoring
GRAIN = "time_series_id"
BACKTEST_ITER = "backtest_iteration"
ACTUALS = "actual_level"
PREDICTIONS = "predicted_level"
ALL_GRAINS = "all_sets"
FORECASTS_FILE = "forecast.csv"
SCORES_FILE = "scores.csv"
PLOTS_FILE = "plots_fcst_vs_actual.pdf"
RE_INVALID_SYMBOLS = re.compile("[: ]")
def _compute_metrics(df: pd.DataFrame, metrics: List[str]):
"""
Compute metrics for one data frame.
:param df: The data frame which contains actual_level and predicted_level columns.
:return: The data frame with two columns - metric_name and metric.
"""
scores = scoring.score_regression(
y_test=df[ACTUALS], y_pred=df[PREDICTIONS], metrics=metrics
)
metrics_df = pd.DataFrame(list(scores.items()), columns=["metric_name", "metric"])
metrics_df.sort_values(["metric_name"], inplace=True)
metrics_df.reset_index(drop=True, inplace=True)
return metrics_df
def _format_grain_name(grain: GrainType) -> str:
"""
Convert grain name to string.
:param grain: the grain name.
:return: the string representation of the given grain.
"""
if not isinstance(grain, tuple) and not isinstance(grain, list):
return str(grain)
grain = list(map(str, grain))
return "|".join(grain)
def compute_all_metrics(
fcst_df: pd.DataFrame,
ts_id_colnames: List[str],
metric_names: Optional[List[set]] = None,
):
"""
Calculate metrics per grain.
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
:param metric_names: (optional) the list of metric names to return
:param ts_id_colnames: (optional) list of grain column names
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
"""
if not metric_names:
metric_names = list(constants.Metric.SCALAR_REGRESSION_SET)
if ts_id_colnames is None:
ts_id_colnames = []
metrics_list = []
if ts_id_colnames:
for grain, df in fcst_df.groupby(ts_id_colnames):
one_grain_metrics_df = _compute_metrics(df, metric_names)
one_grain_metrics_df[GRAIN] = _format_grain_name(grain)
metrics_list.append(one_grain_metrics_df)
# overall metrics
one_grain_metrics_df = _compute_metrics(fcst_df, metric_names)
one_grain_metrics_df[GRAIN] = ALL_GRAINS
metrics_list.append(one_grain_metrics_df)
# collect into a data frame
return pd.concat(metrics_list)
def _draw_one_plot(
df: pd.DataFrame,
time_column_name: str,
grain_column_names: List[str],
pdf: PdfPages,
) -> None:
"""
Draw the single plot.
:param df: The data frame with the data to build plot.
:param time_column_name: The name of a time column.
:param grain_column_names: The name of grain columns.
:param pdf: The pdf backend used to render the plot.
"""
fig, _ = plt.subplots(figsize=(20, 10))
df = df.set_index(time_column_name)
plt.plot(df[[ACTUALS, PREDICTIONS]])
plt.xticks(rotation=45)
iteration = df[BACKTEST_ITER].iloc[0]
if grain_column_names:
grain_name = [df[grain].iloc[0] for grain in grain_column_names]
plt.title(f"Time series ID: {_format_grain_name(grain_name)} {iteration}")
plt.legend(["actual", "forecast"])
plt.close(fig)
pdf.savefig(fig)
def calculate_scores_and_build_plots(
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
):
os.makedirs(output_dir, exist_ok=True)
grains = automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
if grains is None:
grains = []
if isinstance(grains, str):
grains = [grains]
while BACKTEST_ITER in grains:
grains.remove(BACKTEST_ITER)
dfs = []
for fle in os.listdir(input_dir):
file_path = os.path.join(input_dir, fle)
if os.path.isfile(file_path) and file_path.endswith(".csv"):
df_iter = pd.read_csv(file_path, parse_dates=[time_column_name])
for _, iteration in df_iter.groupby(BACKTEST_ITER):
dfs.append(iteration)
forecast_df = pd.concat(dfs, sort=False, ignore_index=True)
# To make sure plots are in order, sort the predictions by grain and iteration.
ts_index = grains + [BACKTEST_ITER]
forecast_df.sort_values(by=ts_index, inplace=True)
pdf = PdfPages(os.path.join(output_dir, PLOTS_FILE))
for _, one_forecast in forecast_df.groupby(ts_index):
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
pdf.close()
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
if __name__ == "__main__":
args = {"forecasts": "--forecasts", "scores_out": "--output-dir"}
parser = argparse.ArgumentParser("Parsing input arguments.")
for argname, arg in args.items():
parser.add_argument(arg, dest=argname, required=True)
parsed_args, _ = parser.parse_known_args()
input_dir = parsed_args.forecasts
output_dir = parsed_args.scores_out
with open(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
)
) as json_file:
automl_settings = json.load(json_file)
calculate_scores_and_build_plots(input_dir, output_dir, automl_settings)

View File

@@ -0,0 +1,725 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Many Models with Backtesting - Automated ML\n",
"**_Backtest many models time series forecasts with Automated Machine Learning_**\n",
"\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For this notebook we are using a synthetic dataset to demonstrate the back testing in many model scenario. This allows us to check historical performance of AutoML on a historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n",
"\n",
"Thus, it is a quick way of evaluating AutoML as if it was in production. Here, we do not test historical performance of a particular model, for this see the [notebook](../forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb). Instead, the best model for every backtest iteration can be different since AutoML chooses the best model for a given training set.\n",
"![Backtesting](Backtesting.png)\n",
"\n",
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prerequisites\n",
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1.0 Set up workspace, datastore, experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003526897
}
},
"outputs": [],
"source": [
"import os\n",
"\n",
"import azureml.core\n",
"from azureml.core import Workspace, Datastore\n",
"import numpy as np\n",
"import pandas as pd\n",
"\n",
"from pandas.tseries.frequencies import to_offset\n",
"\n",
"# Set up your workspace\n",
"ws = Workspace.from_config()\n",
"ws.get_details()\n",
"\n",
"# Set up your datastores\n",
"dstore = ws.get_default_datastore()\n",
"\n",
"output = {}\n",
"output[\"SDK version\"] = azureml.core.VERSION\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Default datastore name\"] = dstore.name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook is compatible with Azure ML SDK version 1.35.1 or later."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose an experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003540729
}
},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment = Experiment(ws, \"automl-many-models-backtest\")\n",
"\n",
"print(\"Experiment name: \" + experiment.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2.0 Data\n",
"\n",
"#### 2.1 Data generation\n",
"For this notebook we will generate the artificial data set with two [time series IDs](https://docs.microsoft.com/en-us/python/api/azureml-automl-core/azureml.automl.core.forecasting_parameters.forecastingparameters?view=azure-ml-py). Then we will generate backtest folds and will upload it to the default BLOB storage and create a [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# simulate data: 2 grains - 700\n",
"TIME_COLNAME = \"date\"\n",
"TARGET_COLNAME = \"value\"\n",
"TIME_SERIES_ID_COLNAME = \"ts_id\"\n",
"\n",
"sample_size = 700\n",
"# Set the random seed for reproducibility of results.\n",
"np.random.seed(20)\n",
"X1 = pd.DataFrame(\n",
" {\n",
" TIME_COLNAME: pd.date_range(start=\"2018-01-01\", periods=sample_size),\n",
" TARGET_COLNAME: np.random.normal(loc=100, scale=20, size=sample_size),\n",
" TIME_SERIES_ID_COLNAME: \"ts_A\",\n",
" }\n",
")\n",
"X2 = pd.DataFrame(\n",
" {\n",
" TIME_COLNAME: pd.date_range(start=\"2018-01-01\", periods=sample_size),\n",
" TARGET_COLNAME: np.random.normal(loc=100, scale=20, size=sample_size),\n",
" TIME_SERIES_ID_COLNAME: \"ts_B\",\n",
" }\n",
")\n",
"\n",
"X = pd.concat([X1, X2], ignore_index=True, sort=False)\n",
"print(\"Simulated dataset contains {} rows \\n\".format(X.shape[0]))\n",
"X.head()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we will generate 8 backtesting folds with backtesting period of 7 days and with the same forecasting horizon. We will add the column \"backtest_iteration\", which will identify the backtesting period by the last training date."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"offset_type = \"7D\"\n",
"NUMBER_OF_BACKTESTS = 8 # number of train/test sets to generate\n",
"\n",
"dfs_train = []\n",
"dfs_test = []\n",
"for ts_id, df_one in X.groupby(TIME_SERIES_ID_COLNAME):\n",
"\n",
" data_end = df_one[TIME_COLNAME].max()\n",
"\n",
" for i in range(NUMBER_OF_BACKTESTS):\n",
" train_cutoff_date = data_end - to_offset(offset_type)\n",
" df_one = df_one.copy()\n",
" df_one[\"backtest_iteration\"] = \"iteration_\" + str(train_cutoff_date)\n",
" train = df_one[df_one[TIME_COLNAME] <= train_cutoff_date]\n",
" test = df_one[\n",
" (df_one[TIME_COLNAME] > train_cutoff_date)\n",
" & (df_one[TIME_COLNAME] <= data_end)\n",
" ]\n",
" data_end = train[TIME_COLNAME].max()\n",
" dfs_train.append(train)\n",
" dfs_test.append(test)\n",
"\n",
"X_train = pd.concat(dfs_train, sort=False, ignore_index=True)\n",
"X_test = pd.concat(dfs_test, sort=False, ignore_index=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### 2.2 Create the Tabular Data Set.\n",
"\n",
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
"\n",
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.\n",
"\n",
"In this next step, we will upload the data and create a TabularDataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"ds = ws.get_default_datastore()\n",
"# Upload saved data to the default data store.\n",
"train_data = TabularDatasetFactory.register_pandas_dataframe(\n",
" X_train, target=(ds, \"data_mm\"), name=\"data_train\"\n",
")\n",
"test_data = TabularDatasetFactory.register_pandas_dataframe(\n",
" X_test, target=(ds, \"data_mm\"), name=\"data_test\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3.0 Build the training pipeline\n",
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose a compute target\n",
"\n",
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"\n",
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
"\n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007037308
}
},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"\n",
"# Name your cluster\n",
"compute_name = \"backtest-mm\"\n",
"\n",
"\n",
"if compute_name in ws.compute_targets:\n",
" compute_target = ws.compute_targets[compute_name]\n",
" if compute_target and type(compute_target) is AmlCompute:\n",
" print(\"Found compute target: \" + compute_name)\n",
"else:\n",
" print(\"Creating a new compute target...\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" # Create the compute target\n",
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
"\n",
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
" # If no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
" )\n",
"\n",
" # For a more detailed view of current cluster status, use the 'status' property\n",
" print(compute_target.status.serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up training parameters\n",
"\n",
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition. Please note, that in this case we are setting grain_column_names to be the time series ID column plus iteration, because we want to train a separate model for each time series and iteration.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n",
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>normalized_root_mean_squared_error</i><br><i>normalized_mean_absolute_error</i> |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
"| **label_column_name** | The name of the label column. |\n",
"| **max_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **grain_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007061544
}
},
"outputs": [],
"source": [
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsTrainParameters,\n",
")\n",
"\n",
"partition_column_names = [TIME_SERIES_ID_COLNAME, \"backtest_iteration\"]\n",
"automl_settings = {\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
" \"iterations\": 15,\n",
" \"experiment_timeout_hours\": 0.25, # This also needs to be changed based on the dataset. For larger data set this number needs to be bigger.\n",
" \"label_column_name\": TARGET_COLNAME,\n",
" \"n_cross_validations\": 3,\n",
" \"time_column_name\": TIME_COLNAME,\n",
" \"max_horizon\": 6,\n",
" \"grain_column_names\": partition_column_names,\n",
" \"track_child_runs\": False,\n",
"}\n",
"\n",
"mm_paramters = ManyModelsTrainParameters(\n",
" automl_settings=automl_settings, partition_column_names=partition_column_names\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up many models pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for training. |\n",
"| **train_data** | The file dataset to be used as input to the training run. |\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. |\n",
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
"\n",
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"\n",
"\n",
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
" experiment=experiment,\n",
" train_data=train_data,\n",
" compute_target=compute_target,\n",
" node_count=2,\n",
" process_count_per_node=2,\n",
" run_invocation_timeout=920,\n",
" train_pipeline_parameters=mm_paramters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit the pipeline to run\n",
"Next we submit our pipeline to run. The whole training pipeline takes about 20 minutes using a STANDARD_DS12_V2 VM with our current ParallelRunConfig setting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run = experiment.submit(training_pipeline)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Check the run status, if training_run is in completed state, continue to next section. Otherwise, check the portal for failures."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 4.0 Backtesting\n",
"Now that we selected the best AutoML model for each backtest fold, we will use these models to generate the forecasts and compare with the actuals."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up output dataset for inference data\n",
"Output of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data import OutputFileDatasetConfig\n",
"\n",
"output_inference_data_ds = OutputFileDatasetConfig(\n",
" name=\"many_models_inference_output\",\n",
" destination=(dstore, \"backtesting/inference_data/\"),\n",
").register_on_complete(name=\"backtesting_data_ds\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For many models we need to provide the ManyModelsInferenceParameters object.\n",
"\n",
"#### ManyModelsInferenceParameters arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **partition_column_names** | List of column names that identifies groups. |\n",
"| **target_column_name** | \\[Optional\\] Column name only if the inference dataset has the target. |\n",
"| **time_column_name** | Column name only if it is timeseries. |\n",
"| **many_models_run_id** | \\[Optional\\] Many models pipeline run id where models were trained. |\n",
"\n",
"#### get_many_models_batch_inference_steps arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for inference run. |\n",
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
"| **compute_target** | The compute target that runs the inference pipeline.|\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
"| **process_count_per_node** | The number of processes per node.\n",
"| **train_run_id** | \\[Optional\\] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n",
"| **train_experiment_name** | \\[Optional\\] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
"| **process_count_per_node** | \\[Optional\\] The number of processes per node, by default it's 4. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsInferenceParameters,\n",
")\n",
"\n",
"mm_parameters = ManyModelsInferenceParameters(\n",
" partition_column_names=partition_column_names,\n",
" time_column_name=TIME_COLNAME,\n",
" target_column_name=TARGET_COLNAME,\n",
")\n",
"\n",
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
" experiment=experiment,\n",
" inference_data=test_data,\n",
" node_count=2,\n",
" process_count_per_node=2,\n",
" compute_target=compute_target,\n",
" run_invocation_timeout=300,\n",
" output_datastore=output_inference_data_ds,\n",
" train_run_id=training_run.id,\n",
" train_experiment_name=training_run.experiment.name,\n",
" inference_pipeline_parameters=mm_parameters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"inference_pipeline = Pipeline(ws, steps=inference_steps)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"inference_run = experiment.submit(inference_pipeline)\n",
"inference_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 5.0 Retrieve results and calculate metrics\n",
"\n",
"The pipeline returns one file with the predictions for each times series ID and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. \n",
"\n",
"The next code snippet does the following:\n",
"1. Downloads the contents of the output folder that is passed in the parallel run step \n",
"2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe \n",
"3. Saves the table in csv format and \n",
"4. Displays the top 10 rows of the predictions"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n",
"\n",
"forecasting_results_name = \"forecasting_results\"\n",
"forecasting_output_name = \"many_models_inference_output\"\n",
"forecast_file = get_output_from_mm_pipeline(\n",
" inference_run, forecasting_results_name, forecasting_output_name\n",
")\n",
"df = pd.read_csv(forecast_file, delimiter=\" \", header=None, parse_dates=[0])\n",
"df.columns = list(X_train.columns) + [\"predicted_level\"]\n",
"print(\n",
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
")\n",
"# Save the scv file with header to read it in the next step.\n",
"df.rename(columns={TARGET_COLNAME: \"actual_level\"}, inplace=True)\n",
"df.to_csv(os.path.join(forecasting_results_name, \"forecast.csv\"), index=False)\n",
"df.head(10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## View metrics\n",
"We will read in the obtained results and run the helper script, which will generate metrics and create the plots of predicted versus actual values."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from assets.score import calculate_scores_and_build_plots\n",
"\n",
"backtesting_results = \"backtesting_mm_results\"\n",
"os.makedirs(backtesting_results, exist_ok=True)\n",
"calculate_scores_and_build_plots(\n",
" forecasting_results_name, backtesting_results, automl_settings\n",
")\n",
"pd.DataFrame({\"File\": os.listdir(backtesting_results)})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The directory contains a set of files with results:\n",
"- forecast.csv contains forecasts for all backtest iterations. The backtest_iteration column contains iteration identifier with the last training date as a suffix\n",
"- scores.csv contains all metrics. If data set contains several time series, the metrics are given for all combinations of time series id and iterations, as well as scores for all iterations and time series ids, which are marked as \"all_sets\"\n",
"- plots_fcst_vs_actual.pdf contains the predictions vs forecast plots for each iteration and, eash time series is saved as separate plot.\n",
"\n",
"For demonstration purposes we will display the table of metrics for one of the time series with ID \"ts0\". We will create the utility function, which will build the table with metrics."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_metrics_for_ts(all_metrics, ts):\n",
" \"\"\"\n",
" Get the metrics for the time series with ID ts and return it as pandas data frame.\n",
"\n",
" :param all_metrics: The table with all the metrics.\n",
" :param ts: The ID of a time series of interest.\n",
" :return: The pandas DataFrame with metrics for one time series.\n",
" \"\"\"\n",
" results_df = None\n",
" for ts_id, one_series in all_metrics.groupby(\"time_series_id\"):\n",
" if not ts_id.startswith(ts):\n",
" continue\n",
" iteration = ts_id.split(\"|\")[-1]\n",
" df = one_series[[\"metric_name\", \"metric\"]]\n",
" df.rename({\"metric\": iteration}, axis=1, inplace=True)\n",
" df.set_index(\"metric_name\", inplace=True)\n",
" if results_df is None:\n",
" results_df = df\n",
" else:\n",
" results_df = results_df.merge(\n",
" df, how=\"inner\", left_index=True, right_index=True\n",
" )\n",
" results_df.sort_index(axis=1, inplace=True)\n",
" return results_df\n",
"\n",
"\n",
"metrics_df = pd.read_csv(os.path.join(backtesting_results, \"scores.csv\"))\n",
"ts = \"ts_A\"\n",
"get_metrics_for_ts(metrics_df, ts)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Forecast vs actuals plots."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import IFrame\n",
"\n",
"IFrame(\"./backtesting_mm_results/plots_fcst_vs_actual.pdf\", width=800, height=300)"
]
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"categories": [
"how-to-use-azureml",
"automated-machine-learning"
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-backtest-many-models
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,3 @@
dependencies:
- pip:
- azureml-contrib-automl-pipeline-steps

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@@ -0,0 +1,45 @@
import argparse
import os
import pandas as pd
import azureml.train.automl.runtime._hts.hts_runtime_utilities as hru
from azureml.core import Run
from azureml.core.dataset import Dataset
# Parse the arguments.
args = {
"step_size": "--step-size",
"step_number": "--step-number",
"time_column_name": "--time-column-name",
"time_series_id_column_names": "--time-series-id-column-names",
"out_dir": "--output-dir",
}
parser = argparse.ArgumentParser("Parsing input arguments.")
for argname, arg in args.items():
parser.add_argument(arg, dest=argname, required=True)
parsed_args, _ = parser.parse_known_args()
step_number = int(parsed_args.step_number)
step_size = int(parsed_args.step_size)
# Create the working dirrectory to store the temporary csv files.
working_dir = parsed_args.out_dir
os.makedirs(working_dir, exist_ok=True)
# Set input and output
script_run = Run.get_context()
input_dataset = script_run.input_datasets["training_data"]
X_train = input_dataset.to_pandas_dataframe()
# Split the data.
for i in range(step_number):
file_name = os.path.join(working_dir, "backtest_{}.csv".format(i))
if parsed_args.time_series_id_column_names:
dfs = []
for _, one_series in X_train.groupby([parsed_args.time_series_id_column_names]):
one_series = one_series.sort_values(
by=[parsed_args.time_column_name], inplace=False
)
dfs.append(one_series.iloc[: len(one_series) - step_size * i])
pd.concat(dfs, sort=False, ignore_index=True).to_csv(file_name, index=False)
else:
X_train.sort_values(by=[parsed_args.time_column_name], inplace=True)
X_train.iloc[: len(X_train) - step_size * i].to_csv(file_name, index=False)

View File

@@ -0,0 +1,173 @@
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""The batch script needed for back testing of models using PRS."""
import argparse
import json
import logging
import os
import pickle
import re
import pandas as pd
from azureml.core.experiment import Experiment
from azureml.core.model import Model
from azureml.core.run import Run
from azureml.automl.core.shared import constants
from azureml.automl.runtime.shared.score import scoring
from azureml.train.automl import AutoMLConfig
RE_INVALID_SYMBOLS = re.compile(r"[:\s]")
model_name = None
target_column_name = None
current_step_run = None
output_dir = None
logger = logging.getLogger(__name__)
def _get_automl_settings():
with open(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
)
) as json_file:
return json.load(json_file)
def init():
global model_name
global target_column_name
global output_dir
global automl_settings
global model_uid
logger.info("Initialization of the run.")
parser = argparse.ArgumentParser("Parsing input arguments.")
parser.add_argument("--output-dir", dest="out", required=True)
parser.add_argument("--model-name", dest="model", default=None)
parser.add_argument("--model-uid", dest="model_uid", default=None)
parsed_args, _ = parser.parse_known_args()
model_name = parsed_args.model
automl_settings = _get_automl_settings()
target_column_name = automl_settings.get("label_column_name")
output_dir = parsed_args.out
model_uid = parsed_args.model_uid
os.makedirs(output_dir, exist_ok=True)
os.environ["AUTOML_IGNORE_PACKAGE_VERSION_INCOMPATIBILITIES".lower()] = "True"
def get_run():
global current_step_run
if current_step_run is None:
current_step_run = Run.get_context()
return current_step_run
def run_backtest(data_input_name: str, file_name: str, experiment: Experiment):
"""Re-train the model and return metrics."""
data_input = pd.read_csv(
data_input_name,
parse_dates=[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]],
)
print(data_input.head())
if not automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES):
# There is no grains.
data_input.sort_values(
[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]], inplace=True
)
X_train = data_input.iloc[: -automl_settings["max_horizon"]]
y_train = X_train.pop(target_column_name).values
X_test = data_input.iloc[-automl_settings["max_horizon"] :]
y_test = X_test.pop(target_column_name).values
else:
# The data contain grains.
dfs_train = []
dfs_test = []
for _, one_series in data_input.groupby(
automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
):
one_series.sort_values(
[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]], inplace=True
)
dfs_train.append(one_series.iloc[: -automl_settings["max_horizon"]])
dfs_test.append(one_series.iloc[-automl_settings["max_horizon"] :])
X_train = pd.concat(dfs_train, sort=False, ignore_index=True)
y_train = X_train.pop(target_column_name).values
X_test = pd.concat(dfs_test, sort=False, ignore_index=True)
y_test = X_test.pop(target_column_name).values
last_training_date = str(
X_train[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]].max()
)
if file_name:
# If file name is provided, we will load model and retrain it on backtest data.
with open(file_name, "rb") as fp:
fitted_model = pickle.load(fp)
fitted_model.fit(X_train, y_train)
else:
# We will run the experiment and select the best model.
X_train[target_column_name] = y_train
automl_config = AutoMLConfig(training_data=X_train, **automl_settings)
automl_run = current_step_run.submit_child(automl_config, show_output=True)
best_run, fitted_model = automl_run.get_output()
# As we have generated models, we need to register them for the future use.
description = "Backtest model example"
tags = {"last_training_date": last_training_date, "experiment": experiment.name}
if model_uid:
tags["model_uid"] = model_uid
automl_run.register_model(
model_name=best_run.properties["model_name"],
description=description,
tags=tags,
)
print(f"The model {best_run.properties['model_name']} was registered.")
_, x_pred = fitted_model.forecast(X_test)
x_pred.reset_index(inplace=True, drop=False)
columns = [automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]]
if automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES):
# We know that fitted_model.grain_column_names is a list.
columns.extend(fitted_model.grain_column_names)
columns.append(constants.TimeSeriesInternal.DUMMY_TARGET_COLUMN)
# Remove featurized columns.
x_pred = x_pred[columns]
x_pred.rename(
{constants.TimeSeriesInternal.DUMMY_TARGET_COLUMN: "predicted_level"},
axis=1,
inplace=True,
)
x_pred["actual_level"] = y_test
x_pred["backtest_iteration"] = f"iteration_{last_training_date}"
date_safe = RE_INVALID_SYMBOLS.sub("_", last_training_date)
x_pred.to_csv(os.path.join(output_dir, f"iteration_{date_safe}.csv"), index=False)
return x_pred
def run(input_files):
"""Run the script"""
logger.info("Running mini batch.")
ws = get_run().experiment.workspace
file_name = None
if model_name:
models = Model.list(ws, name=model_name)
cloud_model = None
if models:
for one_mod in models:
if cloud_model is None or one_mod.version > cloud_model.version:
logger.info(
"Using existing model from the workspace. Model version: {}".format(
one_mod.version
)
)
cloud_model = one_mod
file_name = cloud_model.download(exist_ok=True)
forecasts = []
logger.info("Running backtest.")
for input_file in input_files:
forecasts.append(run_backtest(input_file, file_name, get_run().experiment))
return pd.concat(forecasts)

View File

@@ -0,0 +1,167 @@
from typing import Any, Dict, Optional, List
import argparse
import json
import os
import re
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from azureml.automl.core.shared import constants
from azureml.automl.core.shared.types import GrainType
from azureml.automl.runtime.shared.score import scoring
GRAIN = "time_series_id"
BACKTEST_ITER = "backtest_iteration"
ACTUALS = "actual_level"
PREDICTIONS = "predicted_level"
ALL_GRAINS = "all_sets"
FORECASTS_FILE = "forecast.csv"
SCORES_FILE = "scores.csv"
PLOTS_FILE = "plots_fcst_vs_actual.pdf"
RE_INVALID_SYMBOLS = re.compile("[: ]")
def _compute_metrics(df: pd.DataFrame, metrics: List[str]):
"""
Compute metrics for one data frame.
:param df: The data frame which contains actual_level and predicted_level columns.
:return: The data frame with two columns - metric_name and metric.
"""
scores = scoring.score_regression(
y_test=df[ACTUALS], y_pred=df[PREDICTIONS], metrics=metrics
)
metrics_df = pd.DataFrame(list(scores.items()), columns=["metric_name", "metric"])
metrics_df.sort_values(["metric_name"], inplace=True)
metrics_df.reset_index(drop=True, inplace=True)
return metrics_df
def _format_grain_name(grain: GrainType) -> str:
"""
Convert grain name to string.
:param grain: the grain name.
:return: the string representation of the given grain.
"""
if not isinstance(grain, tuple) and not isinstance(grain, list):
return str(grain)
grain = list(map(str, grain))
return "|".join(grain)
def compute_all_metrics(
fcst_df: pd.DataFrame,
ts_id_colnames: List[str],
metric_names: Optional[List[set]] = None,
):
"""
Calculate metrics per grain.
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
:param metric_names: (optional) the list of metric names to return
:param ts_id_colnames: (optional) list of grain column names
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
"""
if not metric_names:
metric_names = list(constants.Metric.SCALAR_REGRESSION_SET)
if ts_id_colnames is None:
ts_id_colnames = []
metrics_list = []
if ts_id_colnames:
for grain, df in fcst_df.groupby(ts_id_colnames):
one_grain_metrics_df = _compute_metrics(df, metric_names)
one_grain_metrics_df[GRAIN] = _format_grain_name(grain)
metrics_list.append(one_grain_metrics_df)
# overall metrics
one_grain_metrics_df = _compute_metrics(fcst_df, metric_names)
one_grain_metrics_df[GRAIN] = ALL_GRAINS
metrics_list.append(one_grain_metrics_df)
# collect into a data frame
return pd.concat(metrics_list)
def _draw_one_plot(
df: pd.DataFrame,
time_column_name: str,
grain_column_names: List[str],
pdf: PdfPages,
) -> None:
"""
Draw the single plot.
:param df: The data frame with the data to build plot.
:param time_column_name: The name of a time column.
:param grain_column_names: The name of grain columns.
:param pdf: The pdf backend used to render the plot.
"""
fig, _ = plt.subplots(figsize=(20, 10))
df = df.set_index(time_column_name)
plt.plot(df[[ACTUALS, PREDICTIONS]])
plt.xticks(rotation=45)
iteration = df[BACKTEST_ITER].iloc[0]
if grain_column_names:
grain_name = [df[grain].iloc[0] for grain in grain_column_names]
plt.title(f"Time series ID: {_format_grain_name(grain_name)} {iteration}")
plt.legend(["actual", "forecast"])
plt.close(fig)
pdf.savefig(fig)
def calculate_scores_and_build_plots(
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
):
os.makedirs(output_dir, exist_ok=True)
grains = automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
if grains is None:
grains = []
if isinstance(grains, str):
grains = [grains]
while BACKTEST_ITER in grains:
grains.remove(BACKTEST_ITER)
dfs = []
for fle in os.listdir(input_dir):
file_path = os.path.join(input_dir, fle)
if os.path.isfile(file_path) and file_path.endswith(".csv"):
df_iter = pd.read_csv(file_path, parse_dates=[time_column_name])
for _, iteration in df_iter.groupby(BACKTEST_ITER):
dfs.append(iteration)
forecast_df = pd.concat(dfs, sort=False, ignore_index=True)
# To make sure plots are in order, sort the predictions by grain and iteration.
ts_index = grains + [BACKTEST_ITER]
forecast_df.sort_values(by=ts_index, inplace=True)
pdf = PdfPages(os.path.join(output_dir, PLOTS_FILE))
for _, one_forecast in forecast_df.groupby(ts_index):
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
pdf.close()
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
if __name__ == "__main__":
args = {"forecasts": "--forecasts", "scores_out": "--output-dir"}
parser = argparse.ArgumentParser("Parsing input arguments.")
for argname, arg in args.items():
parser.add_argument(arg, dest=argname, required=True)
parsed_args, _ = parser.parse_known_args()
input_dir = parsed_args.forecasts
output_dir = parsed_args.scores_out
with open(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
)
) as json_file:
automl_settings = json.load(json_file)
calculate_scores_and_build_plots(input_dir, output_dir, automl_settings)

View File

@@ -0,0 +1,719 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License.\n",
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/automl-forecasting-function.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated MachineLearning\n",
"_**The model backtesting**_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"2. [Setup](#Setup)\n",
"3. [Data](#Data)\n",
"4. [Prepare remote compute and data.](#prepare_remote)\n",
"5. [Create the configuration for AutoML backtesting](#train)\n",
"6. [Backtest AutoML](#backtest_automl)\n",
"7. [View metrics](#Metrics)\n",
"8. [Backtest the best model](#backtest_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"Model backtesting is used to evaluate its performance on historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n",
"This notebook is intended to demonstrate backtesting on a single model, this is the best solution for small data sets with a few or one time series in it. For scenarios where we would like to choose the best AutoML model for every backtest iteration, please see [AutoML Forecasting Backtest Many Models Example](../forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb) notebook.\n",
"![Backtesting](Backtesting.png)\n",
"This notebook demonstrates two ways of backtesting:\n",
"- AutoML backtesting: we will train separate AutoML models for historical data\n",
"- Model backtesting: from the first run we will select the best model trained on the most recent data, retrain it on the past data and evaluate."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import numpy as np\n",
"import pandas as pd\n",
"import shutil\n",
"\n",
"import azureml.core\n",
"from azureml.core import Experiment, Model, Workspace"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook is compatible with Azure ML SDK version 1.35.1 or later."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As part of the setup you have already created a <b>Workspace</b>."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"output = {}\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"SKU\"] = ws.sku\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Data\n",
"For the demonstration purposes we will simulate one year of daily data. To do this we need to specify the following parameters: time column name, time series ID column names and label column name. Our intention is to forecast for two weeks ahead."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TIME_COLUMN_NAME = \"date\"\n",
"TIME_SERIES_ID_COLUMN_NAMES = \"time_series_id\"\n",
"LABEL_COLUMN_NAME = \"y\"\n",
"FORECAST_HORIZON = 14\n",
"FREQUENCY = \"D\"\n",
"\n",
"\n",
"def simulate_timeseries_data(\n",
" train_len: int,\n",
" test_len: int,\n",
" time_column_name: str,\n",
" target_column_name: str,\n",
" time_series_id_column_name: str,\n",
" time_series_number: int = 1,\n",
" freq: str = \"H\",\n",
"):\n",
" \"\"\"\n",
" Return the time series of designed length.\n",
"\n",
" :param train_len: The length of training data (one series).\n",
" :type train_len: int\n",
" :param test_len: The length of testing data (one series).\n",
" :type test_len: int\n",
" :param time_column_name: The desired name of a time column.\n",
" :type time_column_name: str\n",
" :param time_series_number: The number of time series in the data set.\n",
" :type time_series_number: int\n",
" :param freq: The frequency string representing pandas offset.\n",
" see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n",
" :type freq: str\n",
" :returns: the tuple of train and test data sets.\n",
" :rtype: tuple\n",
"\n",
" \"\"\"\n",
" data_train = [] # type: List[pd.DataFrame]\n",
" data_test = [] # type: List[pd.DataFrame]\n",
" data_length = train_len + test_len\n",
" for i in range(time_series_number):\n",
" X = pd.DataFrame(\n",
" {\n",
" time_column_name: pd.date_range(\n",
" start=\"2000-01-01\", periods=data_length, freq=freq\n",
" ),\n",
" target_column_name: np.arange(data_length).astype(float)\n",
" + np.random.rand(data_length)\n",
" + i * 5,\n",
" \"ext_predictor\": np.asarray(range(42, 42 + data_length)),\n",
" time_series_id_column_name: np.repeat(\"ts{}\".format(i), data_length),\n",
" }\n",
" )\n",
" data_train.append(X[:train_len])\n",
" data_test.append(X[train_len:])\n",
" train = pd.concat(data_train)\n",
" label_train = train.pop(target_column_name).values\n",
" test = pd.concat(data_test)\n",
" label_test = test.pop(target_column_name).values\n",
" return train, label_train, test, label_test\n",
"\n",
"\n",
"n_test_periods = FORECAST_HORIZON\n",
"n_train_periods = 365\n",
"X_train, y_train, X_test, y_test = simulate_timeseries_data(\n",
" train_len=n_train_periods,\n",
" test_len=n_test_periods,\n",
" time_column_name=TIME_COLUMN_NAME,\n",
" target_column_name=LABEL_COLUMN_NAME,\n",
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAMES,\n",
" time_series_number=2,\n",
" freq=FREQUENCY,\n",
")\n",
"X_train[LABEL_COLUMN_NAME] = y_train"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's see what the training data looks like."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_train.tail()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prepare remote compute and data. <a id=\"prepare_remote\"></a>\n",
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace), is paired with the storage account, which contains the default data store. We will use it to upload the artificial data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"ds = ws.get_default_datastore()\n",
"# Upload saved data to the default data store.\n",
"train_data = TabularDatasetFactory.register_pandas_dataframe(\n",
" X_train, target=(ds, \"data\"), name=\"data_backtest\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You will need to create a compute target for backtesting. In this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute), you create AmlCompute as your training compute resource.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# Choose a name for your CPU cluster\n",
"amlcompute_cluster_name = \"backtest-cluster\"\n",
"\n",
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create the configuration for AutoML backtesting <a id=\"train\"></a>\n",
"\n",
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n",
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>normalized_root_mean_squared_error</i><br><i>normalized_mean_absolute_error</i> |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
"| **label_column_name** | The name of the label column. |\n",
"| **max_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **grain_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_settings = {\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
" \"iterations\": 15,\n",
" \"experiment_timeout_hours\": 1, # This also needs to be changed based on the dataset. For larger data set this number needs to be bigger.\n",
" \"label_column_name\": LABEL_COLUMN_NAME,\n",
" \"n_cross_validations\": 3,\n",
" \"time_column_name\": TIME_COLUMN_NAME,\n",
" \"max_horizon\": FORECAST_HORIZON,\n",
" \"track_child_runs\": False,\n",
" \"grain_column_names\": TIME_SERIES_ID_COLUMN_NAMES,\n",
"}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Backtest AutoML <a id=\"backtest_automl\"></a>\n",
"First we set backtesting parameters: we will step back by 30 days and will make 5 such steps; for each step we will forecast for next two weeks."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# The number of periods to step back on each backtest iteration.\n",
"BACKTESTING_PERIOD = 30\n",
"# The number of times we will back test the model.\n",
"NUMBER_OF_BACKTESTS = 5"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To train AutoML on backtesting folds we will use the [Azure Machine Learning pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/concept-ml-pipelines). It will generate backtest folds, then train model for each of them and calculate the accuracy metrics. To run pipeline, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve (here, it is a forecasting), while a Run corresponds to a specific approach to the problem."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from uuid import uuid1\n",
"\n",
"from pipeline_helper import get_backtest_pipeline\n",
"\n",
"pipeline_exp = Experiment(ws, \"automl-backtesting\")\n",
"\n",
"# We will create the unique identifier to mark our models.\n",
"model_uid = str(uuid1())\n",
"\n",
"pipeline = get_backtest_pipeline(\n",
" experiment=pipeline_exp,\n",
" dataset=train_data,\n",
" # The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.\n",
" process_per_node=2,\n",
" # The maximum number of nodes for our compute is 6.\n",
" node_count=6,\n",
" compute_target=compute_target,\n",
" automl_settings=automl_settings,\n",
" step_size=BACKTESTING_PERIOD,\n",
" step_number=NUMBER_OF_BACKTESTS,\n",
" model_uid=model_uid,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Run the pipeline and wait for results."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run = pipeline_exp.submit(pipeline)\n",
"pipeline_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"After the run is complete, we can download the results. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics_output = pipeline_run.get_pipeline_output(\"results\")\n",
"metrics_output.download(\"backtest_metrics\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## View metrics<a id=\"Metrics\"></a>\n",
"To distinguish these metrics from the model backtest, which we will obtain in the next section, we will move the directory with metrics out of the backtest_metrics and will remove the parent folder. We will create the utility function for that."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def copy_scoring_directory(new_name):\n",
" scores_path = os.path.join(\"backtest_metrics\", \"azureml\")\n",
" directory_list = [os.path.join(scores_path, d) for d in os.listdir(scores_path)]\n",
" latest_file = max(directory_list, key=os.path.getctime)\n",
" print(\n",
" f\"The output directory {latest_file} was created on {pd.Timestamp(os.path.getctime(latest_file), unit='s')} GMT.\"\n",
" )\n",
" shutil.move(os.path.join(latest_file, \"results\"), new_name)\n",
" shutil.rmtree(\"backtest_metrics\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Move the directory and list its contents."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"copy_scoring_directory(\"automl_backtest\")\n",
"pd.DataFrame({\"File\": os.listdir(\"automl_backtest\")})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The directory contains a set of files with results:\n",
"- forecast.csv contains forecasts for all backtest iterations. The backtest_iteration column contains iteration identifier with the last training date as a suffix\n",
"- scores.csv contains all metrics. If data set contains several time series, the metrics are given for all combinations of time series id and iterations, as well as scores for all iterations and time series id are marked as \"all_sets\"\n",
"- plots_fcst_vs_actual.pdf contains the predictions vs forecast plots for each iteration and time series.\n",
"\n",
"For demonstration purposes we will display the table of metrics for one of the time series with ID \"ts0\". Again, we will create the utility function, which will be re used in model backtesting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_metrics_for_ts(all_metrics, ts):\n",
" \"\"\"\n",
" Get the metrics for the time series with ID ts and return it as pandas data frame.\n",
"\n",
" :param all_metrics: The table with all the metrics.\n",
" :param ts: The ID of a time series of interest.\n",
" :return: The pandas DataFrame with metrics for one time series.\n",
" \"\"\"\n",
" results_df = None\n",
" for ts_id, one_series in all_metrics.groupby(\"time_series_id\"):\n",
" if not ts_id.startswith(ts):\n",
" continue\n",
" iteration = ts_id.split(\"|\")[-1]\n",
" df = one_series[[\"metric_name\", \"metric\"]]\n",
" df.rename({\"metric\": iteration}, axis=1, inplace=True)\n",
" df.set_index(\"metric_name\", inplace=True)\n",
" if results_df is None:\n",
" results_df = df\n",
" else:\n",
" results_df = results_df.merge(\n",
" df, how=\"inner\", left_index=True, right_index=True\n",
" )\n",
" results_df.sort_index(axis=1, inplace=True)\n",
" return results_df\n",
"\n",
"\n",
"metrics_df = pd.read_csv(os.path.join(\"automl_backtest\", \"scores.csv\"))\n",
"ts_id = \"ts0\"\n",
"get_metrics_for_ts(metrics_df, ts_id)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Forecast vs actuals plots."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import IFrame\n",
"\n",
"IFrame(\"./automl_backtest/plots_fcst_vs_actual.pdf\", width=800, height=300)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# <font color='blue'>Backtest the best model</font> <a id=\"backtest_model\"></a>\n",
"\n",
"For model backtesting we will use the same parameters we used to backtest AutoML. All the models, we have obtained in the previous run were registered in our workspace. To identify the model, each was assigned a tag with the last trainig date."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_list = Model.list(ws, tags={\"experiment\": \"automl-backtesting\"})\n",
"model_data = {\"name\": [], \"last_training_date\": []}\n",
"for model in model_list:\n",
" if (\n",
" \"last_training_date\" not in model.tags\n",
" or \"model_uid\" not in model.tags\n",
" or model.tags[\"model_uid\"] != model_uid\n",
" ):\n",
" continue\n",
" model_data[\"name\"].append(model.name)\n",
" model_data[\"last_training_date\"].append(\n",
" pd.Timestamp(model.tags[\"last_training_date\"])\n",
" )\n",
"df_models = pd.DataFrame(model_data)\n",
"df_models.sort_values([\"last_training_date\"], inplace=True)\n",
"df_models.reset_index(inplace=True, drop=True)\n",
"df_models"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We will backtest the model trained on the most recet data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_name = df_models[\"name\"].iloc[-1]\n",
"model_name"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Retrain the models.\n",
"Assemble the pipeline, which will retrain the best model from AutoML run on historical data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_exp = Experiment(ws, \"model-backtesting\")\n",
"\n",
"pipeline = get_backtest_pipeline(\n",
" experiment=pipeline_exp,\n",
" dataset=train_data,\n",
" # The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.\n",
" process_per_node=2,\n",
" # The maximum number of nodes for our compute is 6.\n",
" node_count=6,\n",
" compute_target=compute_target,\n",
" automl_settings=automl_settings,\n",
" step_size=BACKTESTING_PERIOD,\n",
" step_number=NUMBER_OF_BACKTESTS,\n",
" model_name=model_name,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Launch the backtesting pipeline."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run = pipeline_exp.submit(pipeline)\n",
"pipeline_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The metrics are stored in the pipeline output named \"score\". The next code will download the table with metrics."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics_output = pipeline_run.get_pipeline_output(\"results\")\n",
"metrics_output.download(\"backtest_metrics\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Again, we will copy the data files from the downloaded directory, but in this case we will call the folder \"model_backtest\"; it will contain the same files as the one for AutoML backtesting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"copy_scoring_directory(\"model_backtest\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we will display the metrics."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_metrics_df = pd.read_csv(os.path.join(\"model_backtest\", \"scores.csv\"))\n",
"get_metrics_for_ts(model_metrics_df, ts_id)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Forecast vs actuals plots."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import IFrame\n",
"\n",
"IFrame(\"./model_backtest/plots_fcst_vs_actual.pdf\", width=800, height=300)"
]
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"category": "tutorial",
"compute": [
"Remote"
],
"datasets": [
"None"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"framework": [
"Azure ML AutoML"
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-backtest-single-model
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,166 @@
from typing import Any, Dict, Optional
import os
import azureml.train.automl.runtime._hts.hts_runtime_utilities as hru
from azureml._restclient.jasmine_client import JasmineClient
from azureml.contrib.automl.pipeline.steps import utilities
from azureml.core import RunConfiguration
from azureml.core.compute import ComputeTarget
from azureml.core.experiment import Experiment
from azureml.data import LinkTabularOutputDatasetConfig, TabularDataset
from azureml.pipeline.core import Pipeline, PipelineData, PipelineParameter
from azureml.pipeline.steps import ParallelRunConfig, ParallelRunStep, PythonScriptStep
from azureml.train.automl.constants import Scenarios
from azureml.data.dataset_consumption_config import DatasetConsumptionConfig
PROJECT_FOLDER = "assets"
SETTINGS_FILE = "automl_settings.json"
def get_backtest_pipeline(
experiment: Experiment,
dataset: TabularDataset,
process_per_node: int,
node_count: int,
compute_target: ComputeTarget,
automl_settings: Dict[str, Any],
step_size: int,
step_number: int,
model_name: Optional[str] = None,
model_uid: Optional[str] = None,
) -> Pipeline:
"""
:param experiment: The experiment used to run the pipeline.
:param dataset: Tabular data set to be used for model training.
:param process_per_node: The number of processes per node. Generally it should be the number of cores
on the node divided by two.
:param node_count: The number of nodes to be used.
:param compute_target: The compute target to be used to run the pipeline.
:param model_name: The name of a model to be back tested.
:param automl_settings: The dictionary with automl settings.
:param step_size: The number of periods to step back in backtesting.
:param step_number: The number of backtesting iterations.
:param model_uid: The uid to mark models from this run of the experiment.
:return: The pipeline to be used for model retraining.
**Note:** The output will be uploaded in the pipeline output
called 'score'.
"""
jasmine_client = JasmineClient(
service_context=experiment.workspace.service_context,
experiment_name=experiment.name,
experiment_id=experiment.id,
)
env = jasmine_client.get_curated_environment(
scenario=Scenarios.AUTOML,
enable_dnn=False,
enable_gpu=False,
compute=compute_target,
compute_sku=experiment.workspace.compute_targets.get(
compute_target.name
).vm_size,
)
data_results = PipelineData(
name="results", datastore=None, pipeline_output_name="results"
)
############################################################
# Split the data set using python script.
############################################################
run_config = RunConfiguration()
run_config.docker.use_docker = True
run_config.environment = env
split_data = PipelineData(name="split_data_output", datastore=None).as_dataset()
split_step = PythonScriptStep(
name="split_data_for_backtest",
script_name="data_split.py",
inputs=[dataset.as_named_input("training_data")],
outputs=[split_data],
source_directory=PROJECT_FOLDER,
arguments=[
"--step-size",
step_size,
"--step-number",
step_number,
"--time-column-name",
automl_settings.get("time_column_name"),
"--time-series-id-column-names",
automl_settings.get("grain_column_names"),
"--output-dir",
split_data,
],
runconfig=run_config,
compute_target=compute_target,
allow_reuse=False,
)
############################################################
# We will do the backtest the parallel run step.
############################################################
settings_path = os.path.join(PROJECT_FOLDER, SETTINGS_FILE)
hru.dump_object_to_json(automl_settings, settings_path)
mini_batch_size = PipelineParameter(name="batch_size_param", default_value=str(1))
back_test_config = ParallelRunConfig(
source_directory=PROJECT_FOLDER,
entry_script="retrain_models.py",
mini_batch_size=mini_batch_size,
error_threshold=-1,
output_action="append_row",
append_row_file_name="outputs.txt",
compute_target=compute_target,
environment=env,
process_count_per_node=process_per_node,
run_invocation_timeout=3600,
node_count=node_count,
)
forecasts = PipelineData(name="forecasts", datastore=None)
if model_name:
parallel_step_name = "{}-backtest".format(model_name.replace("_", "-"))
else:
parallel_step_name = "AutoML-backtest"
prs_args = [
"--target_column_name",
automl_settings.get("label_column_name"),
"--output-dir",
forecasts,
]
if model_name is not None:
prs_args.append("--model-name")
prs_args.append(model_name)
if model_uid is not None:
prs_args.append("--model-uid")
prs_args.append(model_uid)
backtest_prs = ParallelRunStep(
name=parallel_step_name,
parallel_run_config=back_test_config,
arguments=prs_args,
inputs=[split_data],
output=forecasts,
allow_reuse=False,
)
############################################################
# Then we collect the output and return it as scores output.
############################################################
collection_step = PythonScriptStep(
name="score",
script_name="score.py",
inputs=[forecasts.as_mount()],
outputs=[data_results],
source_directory=PROJECT_FOLDER,
arguments=[
"--forecasts",
forecasts,
"--output-dir",
data_results,
],
runconfig=run_config,
compute_target=compute_target,
allow_reuse=False,
)
# Build and return the pipeline.
return Pipeline(
workspace=experiment.workspace,
steps=[split_step, backtest_prs, collection_step],
)

View File

@@ -113,7 +113,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -139,18 +139,18 @@
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for the run history container in the workspace\n",
"experiment_name = 'beer-remote-cpu'\n",
"experiment_name = \"beer-remote-cpu\"\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Run History Name'] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
@@ -162,7 +162,9 @@
},
"source": [
"### Using AmlCompute\n",
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource."
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
@@ -183,10 +185,11 @@
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print('Found existing cluster, use it.')\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
" max_nodes=4)\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
@@ -243,17 +246,21 @@
"plt.tight_layout()\n",
"\n",
"plt.subplot(2, 1, 1)\n",
"plt.title('Beer Production By Year')\n",
"df = pd.read_csv(\"Beer_no_valid_split_train.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n",
"test_df = pd.read_csv(\"Beer_no_valid_split_test.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n",
"plt.title(\"Beer Production By Year\")\n",
"df = pd.read_csv(\n",
" \"Beer_no_valid_split_train.csv\", parse_dates=True, index_col=\"DATE\"\n",
").drop(columns=\"grain\")\n",
"test_df = pd.read_csv(\n",
" \"Beer_no_valid_split_test.csv\", parse_dates=True, index_col=\"DATE\"\n",
").drop(columns=\"grain\")\n",
"plt.plot(df)\n",
"\n",
"plt.subplot(2, 1, 2)\n",
"plt.title('Beer Production By Month')\n",
"plt.title(\"Beer Production By Month\")\n",
"groups = df.groupby(df.index.month)\n",
"months = concat([DataFrame(x[1].values) for x in groups], axis=1)\n",
"months = DataFrame(months)\n",
"months.columns = range(1,13)\n",
"months.columns = range(1, 13)\n",
"months.boxplot()\n",
"\n",
"plt.show()"
@@ -268,10 +275,10 @@
},
"outputs": [],
"source": [
"target_column_name = 'BeerProduction'\n",
"time_column_name = 'DATE'\n",
"target_column_name = \"BeerProduction\"\n",
"time_column_name = \"DATE\"\n",
"time_series_id_column_names = []\n",
"freq = 'M' #Monthly data"
"freq = \"M\" # Monthly data"
]
},
{
@@ -299,14 +306,36 @@
"test_df.to_csv(\"test.csv\")\n",
"\n",
"datastore = ws.get_default_datastore()\n",
"datastore.upload_files(files = ['./train.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
"datastore.upload_files(files = ['./valid.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
"datastore.upload_files(files = ['./test.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
"datastore.upload_files(\n",
" files=[\"./train.csv\"],\n",
" target_path=\"beer-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./valid.csv\"],\n",
" target_path=\"beer-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./test.csv\"],\n",
" target_path=\"beer-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"\n",
"from azureml.core import Dataset\n",
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/train.csv')])\n",
"valid_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/valid.csv')])\n",
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])"
"\n",
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"beer-dataset/tabular/train.csv\")]\n",
")\n",
"valid_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"beer-dataset/tabular/valid.csv\")]\n",
")\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"beer-dataset/tabular/test.csv\")]\n",
")"
]
},
{
@@ -364,22 +393,29 @@
"outputs": [],
"source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"\n",
"forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name, forecast_horizon=forecast_horizon\n",
" time_column_name=time_column_name,\n",
" forecast_horizon=forecast_horizon,\n",
" freq=\"MS\", # Set the forecast frequency to be monthly (start of the month)\n",
")\n",
"\n",
"automl_config = AutoMLConfig(task='forecasting', \n",
" primary_metric='normalized_root_mean_squared_error',\n",
" experiment_timeout_hours = 1,\n",
" training_data=train_dataset,\n",
" label_column_name=target_column_name,\n",
" validation_data=valid_dataset, \n",
" verbosity=logging.INFO,\n",
" compute_target=compute_target,\n",
" max_concurrent_iterations=4,\n",
" max_cores_per_iteration=-1,\n",
" enable_dnn=True,\n",
" forecasting_parameters=forecasting_parameters)"
"# We will disable the enable_early_stopping flag to ensure the DNN model is recommended for demonstration purpose.\n",
"automl_config = AutoMLConfig(\n",
" task=\"forecasting\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n",
" experiment_timeout_hours=1,\n",
" training_data=train_dataset,\n",
" label_column_name=target_column_name,\n",
" validation_data=valid_dataset,\n",
" verbosity=logging.INFO,\n",
" compute_target=compute_target,\n",
" max_concurrent_iterations=4,\n",
" max_cores_per_iteration=-1,\n",
" enable_dnn=True,\n",
" enable_early_stopping=False,\n",
" forecasting_parameters=forecasting_parameters,\n",
")"
]
},
{
@@ -401,8 +437,7 @@
},
"outputs": [],
"source": [
"remote_run = experiment.submit(automl_config, show_output= False)\n",
"remote_run"
"remote_run = experiment.submit(automl_config, show_output=True)"
]
},
{
@@ -419,15 +454,6 @@
"# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run.wait_for_completion()"
]
},
{
"cell_type": "markdown",
"metadata": {
@@ -459,6 +485,7 @@
"outputs": [],
"source": [
"from helper import get_result_df\n",
"\n",
"summary_df = get_result_df(remote_run)\n",
"summary_df"
]
@@ -474,11 +501,12 @@
"source": [
"from azureml.core.run import Run\n",
"from azureml.widgets import RunDetails\n",
"forecast_model = 'TCNForecaster'\n",
"if not forecast_model in summary_df['run_id']:\n",
" forecast_model = 'ForecastTCN'\n",
" \n",
"best_dnn_run_id = summary_df['run_id'][forecast_model]\n",
"\n",
"forecast_model = \"TCNForecaster\"\n",
"if not forecast_model in summary_df[\"run_id\"]:\n",
" forecast_model = \"ForecastTCN\"\n",
"\n",
"best_dnn_run_id = summary_df[\"run_id\"][forecast_model]\n",
"best_dnn_run = Run(experiment, best_dnn_run_id)"
]
},
@@ -492,7 +520,7 @@
"outputs": [],
"source": [
"best_dnn_run.parent\n",
"RunDetails(best_dnn_run.parent).show() "
"RunDetails(best_dnn_run.parent).show()"
]
},
{
@@ -505,7 +533,7 @@
"outputs": [],
"source": [
"best_dnn_run\n",
"RunDetails(best_dnn_run).show() "
"RunDetails(best_dnn_run).show()"
]
},
{
@@ -540,7 +568,10 @@
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])\n",
"\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"beer-dataset/tabular/test.csv\")]\n",
")\n",
"# preview the first 3 rows of the dataset\n",
"test_dataset.take(5).to_pandas_dataframe()"
]
@@ -551,7 +582,7 @@
"metadata": {},
"outputs": [],
"source": [
"compute_target = ws.compute_targets['beer-cluster']\n",
"compute_target = ws.compute_targets[\"beer-cluster\"]\n",
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
]
},
@@ -567,9 +598,9 @@
"import os\n",
"import shutil\n",
"\n",
"script_folder = os.path.join(os.getcwd(), 'inference')\n",
"script_folder = os.path.join(os.getcwd(), \"inference\")\n",
"os.makedirs(script_folder, exist_ok=True)\n",
"shutil.copy('infer.py', script_folder)"
"shutil.copy(\"infer.py\", script_folder)"
]
},
{
@@ -580,8 +611,18 @@
"source": [
"from helper import run_inference\n",
"\n",
"test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run, test_dataset, valid_dataset, forecast_horizon,\n",
" target_column_name, time_column_name, freq)"
"test_run = run_inference(\n",
" test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" best_dnn_run,\n",
" test_dataset,\n",
" valid_dataset,\n",
" forecast_horizon,\n",
" target_column_name,\n",
" time_column_name,\n",
" freq,\n",
")"
]
},
{
@@ -601,8 +642,19 @@
"source": [
"from helper import run_multiple_inferences\n",
"\n",
"summary_df = run_multiple_inferences(summary_df, experiment, test_experiment, compute_target, script_folder, test_dataset, \n",
" valid_dataset, forecast_horizon, target_column_name, time_column_name, freq)"
"summary_df = run_multiple_inferences(\n",
" summary_df,\n",
" experiment,\n",
" test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" test_dataset,\n",
" valid_dataset,\n",
" forecast_horizon,\n",
" target_column_name,\n",
" time_column_name,\n",
" freq,\n",
")"
]
},
{
@@ -622,7 +674,7 @@
" test_run = Run(test_experiment, test_run_id)\n",
" test_run.wait_for_completion()\n",
" test_score = test_run.get_metrics()[run_summary.primary_metric]\n",
" summary_df.loc[summary_df.run_id == run_id, 'Test Score'] = test_score\n",
" summary_df.loc[summary_df.run_id == run_id, \"Test Score\"] = test_score\n",
" print(\"Test Score: \", test_score)"
]
},
@@ -668,7 +720,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
"version": "3.6.9"
}
},
"nbformat": 4,

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-beer-remote
dependencies:
- pip:
- azureml-sdk

View File

@@ -6,120 +6,158 @@ from azureml.core.run import Run
from azureml.automl.core.shared import constants
def split_fraction_by_grain(df, fraction, time_column_name,
grain_column_names=None):
def split_fraction_by_grain(df, fraction, time_column_name, grain_column_names=None):
if not grain_column_names:
df['tmp_grain_column'] = 'grain'
grain_column_names = ['tmp_grain_column']
df["tmp_grain_column"] = "grain"
grain_column_names = ["tmp_grain_column"]
"""Group df by grain and split on last n rows for each group."""
df_grouped = (df.sort_values(time_column_name)
.groupby(grain_column_names, group_keys=False))
df_grouped = df.sort_values(time_column_name).groupby(
grain_column_names, group_keys=False
)
df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-int(len(dfg) *
fraction)] if fraction > 0 else dfg)
df_head = df_grouped.apply(
lambda dfg: dfg.iloc[: -int(len(dfg) * fraction)] if fraction > 0 else dfg
)
df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-int(len(dfg) *
fraction):] if fraction > 0 else dfg[:0])
df_tail = df_grouped.apply(
lambda dfg: dfg.iloc[-int(len(dfg) * fraction) :] if fraction > 0 else dfg[:0]
)
if 'tmp_grain_column' in grain_column_names:
if "tmp_grain_column" in grain_column_names:
for df2 in (df, df_head, df_tail):
df2.drop('tmp_grain_column', axis=1, inplace=True)
df2.drop("tmp_grain_column", axis=1, inplace=True)
grain_column_names.remove('tmp_grain_column')
grain_column_names.remove("tmp_grain_column")
return df_head, df_tail
def split_full_for_forecasting(df, time_column_name,
grain_column_names=None, test_split=0.2):
def split_full_for_forecasting(
df, time_column_name, grain_column_names=None, test_split=0.2
):
index_name = df.index.name
# Assumes that there isn't already a column called tmpindex
df['tmpindex'] = df.index
df["tmpindex"] = df.index
train_df, test_df = split_fraction_by_grain(
df, test_split, time_column_name, grain_column_names)
df, test_split, time_column_name, grain_column_names
)
train_df = train_df.set_index('tmpindex')
train_df = train_df.set_index("tmpindex")
train_df.index.name = index_name
test_df = test_df.set_index('tmpindex')
test_df = test_df.set_index("tmpindex")
test_df.index.name = index_name
df.drop('tmpindex', axis=1, inplace=True)
df.drop("tmpindex", axis=1, inplace=True)
return train_df, test_df
def get_result_df(remote_run):
children = list(remote_run.get_children(recursive=True))
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm',
'primary_metric', 'Score'])
summary_df = pd.DataFrame(
index=["run_id", "run_algorithm", "primary_metric", "Score"]
)
goal_minimize = False
for run in children:
if run.get_status().lower() == constants.RunState.COMPLETE_RUN \
and 'run_algorithm' in run.properties and 'score' in run.properties:
if (
run.get_status().lower() == constants.RunState.COMPLETE_RUN
and "run_algorithm" in run.properties
and "score" in run.properties
):
# We only count in the completed child runs.
summary_df[run.id] = [run.id, run.properties['run_algorithm'],
run.properties['primary_metric'],
float(run.properties['score'])]
if ('goal' in run.properties):
goal_minimize = run.properties['goal'].split('_')[-1] == 'min'
summary_df[run.id] = [
run.id,
run.properties["run_algorithm"],
run.properties["primary_metric"],
float(run.properties["score"]),
]
if "goal" in run.properties:
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
summary_df = summary_df.T.sort_values(
'Score',
ascending=goal_minimize).drop_duplicates(['run_algorithm'])
summary_df = summary_df.set_index('run_algorithm')
"Score", ascending=goal_minimize
).drop_duplicates(["run_algorithm"])
summary_df = summary_df.set_index("run_algorithm")
return summary_df
def run_inference(test_experiment, compute_target, script_folder, train_run,
test_dataset, lookback_dataset, max_horizon,
target_column_name, time_column_name, freq):
model_base_name = 'model.pkl'
if 'model_data_location' in train_run.properties:
model_location = train_run.properties['model_data_location']
_, model_base_name = model_location.rsplit('/', 1)
train_run.download_file('outputs/{}'.format(model_base_name), 'inference/{}'.format(model_base_name))
train_run.download_file('outputs/conda_env_v_1_0_0.yml', 'inference/condafile.yml')
def run_inference(
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
):
model_base_name = "model.pkl"
if "model_data_location" in train_run.properties:
model_location = train_run.properties["model_data_location"]
_, model_base_name = model_location.rsplit("/", 1)
train_run.download_file(
"outputs/{}".format(model_base_name), "inference/{}".format(model_base_name)
)
train_run.download_file("outputs/conda_env_v_1_0_0.yml", "inference/condafile.yml")
inference_env = Environment("myenv")
inference_env.docker.enabled = True
inference_env.python.conda_dependencies = CondaDependencies(
conda_dependencies_file_path='inference/condafile.yml')
conda_dependencies_file_path="inference/condafile.yml"
)
est = Estimator(source_directory=script_folder,
entry_script='infer.py',
script_params={
'--max_horizon': max_horizon,
'--target_column_name': target_column_name,
'--time_column_name': time_column_name,
'--frequency': freq,
'--model_path': model_base_name
},
inputs=[test_dataset.as_named_input('test_data'),
lookback_dataset.as_named_input('lookback_data')],
compute_target=compute_target,
environment_definition=inference_env)
est = Estimator(
source_directory=script_folder,
entry_script="infer.py",
script_params={
"--max_horizon": max_horizon,
"--target_column_name": target_column_name,
"--time_column_name": time_column_name,
"--frequency": freq,
"--model_path": model_base_name,
},
inputs=[
test_dataset.as_named_input("test_data"),
lookback_dataset.as_named_input("lookback_data"),
],
compute_target=compute_target,
environment_definition=inference_env,
)
run = test_experiment.submit(
est, tags={
'training_run_id': train_run.id,
'run_algorithm': train_run.properties['run_algorithm'],
'valid_score': train_run.properties['score'],
'primary_metric': train_run.properties['primary_metric']
})
est,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags['run_algorithm'])
run.log("run_algorithm", run.tags["run_algorithm"])
return run
def run_multiple_inferences(summary_df, train_experiment, test_experiment,
compute_target, script_folder, test_dataset,
lookback_dataset, max_horizon, target_column_name,
time_column_name, freq):
def run_multiple_inferences(
summary_df,
train_experiment,
test_experiment,
compute_target,
script_folder,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
):
for run_name, run_summary in summary_df.iterrows():
print(run_name)
print(run_summary)
@@ -127,12 +165,19 @@ def run_multiple_inferences(summary_df, train_experiment, test_experiment,
train_run = Run(train_experiment, run_id)
test_run = run_inference(
test_experiment, compute_target, script_folder, train_run,
test_dataset, lookback_dataset, max_horizon, target_column_name,
time_column_name, freq)
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
)
print(test_run)
summary_df.loc[summary_df.run_id == run_id,
'test_run_id'] = test_run.id
summary_df.loc[summary_df.run_id == run_id, "test_run_id"] = test_run.id
return summary_df

View File

@@ -19,9 +19,14 @@ except ImportError:
_torch_present = False
def align_outputs(y_predicted, X_trans, X_test, y_test,
predicted_column_name='predicted',
horizon_colname='horizon_origin'):
def align_outputs(
y_predicted,
X_trans,
X_test,
y_test,
predicted_column_name="predicted",
horizon_colname="horizon_origin",
):
"""
Demonstrates how to get the output aligned to the inputs
using pandas indexes. Helps understand what happened if
@@ -33,9 +38,13 @@ def align_outputs(y_predicted, X_trans, X_test, y_test,
* model was asked to predict past max_horizon -> increase max horizon
* data at start of X_test was needed for lags -> provide previous periods
"""
if (horizon_colname in X_trans):
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname]})
if horizon_colname in X_trans:
df_fcst = pd.DataFrame(
{
predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname],
}
)
else:
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
@@ -48,20 +57,21 @@ def align_outputs(y_predicted, X_trans, X_test, y_test,
# X_test_full's index does not include origin, so reset for merge
df_fcst.reset_index(inplace=True)
X_test_full = X_test_full.reset_index().drop(columns='index')
together = df_fcst.merge(X_test_full, how='right')
X_test_full = X_test_full.reset_index().drop(columns="index")
together = df_fcst.merge(X_test_full, how="right")
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = together[together[[target_column_name,
predicted_column_name]].notnull().all(axis=1)]
return (clean)
clean = together[
together[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
return clean
def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
max_horizon, X_lookback, y_lookback,
freq='D'):
def do_rolling_forecast_with_lookback(
fitted_model, X_test, y_test, max_horizon, X_lookback, y_lookback, freq="D"
):
"""
Produce forecasts on a rolling origin over the given test set.
@@ -72,7 +82,7 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
origin time for constructing lag features.
This function returns a concatenated DataFrame of rolling forecasts.
"""
"""
print("Using lookback of size: ", y_lookback.size)
df_list = []
origin_time = X_test[time_column_name].min()
@@ -83,22 +93,28 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = (X[time_column_name] < horizon_time)
expand_wind = X[time_column_name] < horizon_time
X_test_expand = X[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN)
if origin_time != X[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = (X[time_column_name] < origin_time)
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
test_context_expand_wind = X[time_column_name] < origin_time
context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y[test_context_expand_wind]
# Print some debug info
print("Horizon_time:", horizon_time,
" origin_time: ", origin_time,
" max_horizon: ", max_horizon,
" freq: ", freq)
print(
"Horizon_time:",
horizon_time,
" origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
@@ -124,9 +140,14 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
df_list.append(align_outputs(
y_fcst[trans_roll_wind], X_trans[trans_roll_wind],
X[test_roll_wind], y[test_roll_wind]))
df_list.append(
align_outputs(
y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X[test_roll_wind],
y[test_roll_wind],
)
)
# Advance the origin time
origin_time = horizon_time
@@ -134,7 +155,7 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
return pd.concat(df_list, ignore_index=True)
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq="D"):
"""
Produce forecasts on a rolling origin over the given test set.
@@ -145,7 +166,7 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
origin time for constructing lag features.
This function returns a concatenated DataFrame of rolling forecasts.
"""
"""
df_list = []
origin_time = X_test[time_column_name].min()
while origin_time <= X_test[time_column_name].max():
@@ -153,23 +174,28 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = (X_test[time_column_name] < horizon_time)
expand_wind = X_test[time_column_name] < horizon_time
X_test_expand = X_test[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN)
if origin_time != X_test[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = (X_test[time_column_name] < origin_time)
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
y_query_expand[context_expand_wind] = y_test[
test_context_expand_wind]
test_context_expand_wind = X_test[time_column_name] < origin_time
context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y_test[test_context_expand_wind]
# Print some debug info
print("Horizon_time:", horizon_time,
" origin_time: ", origin_time,
" max_horizon: ", max_horizon,
" freq: ", freq)
print(
"Horizon_time:",
horizon_time,
" origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
@@ -193,10 +219,14 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X_test[time_column_name] >= origin_time)
df_list.append(align_outputs(y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X_test[test_roll_wind],
y_test[test_roll_wind]))
df_list.append(
align_outputs(
y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X_test[test_roll_wind],
y_test[test_roll_wind],
)
)
# Advance the origin time
origin_time = horizon_time
@@ -230,20 +260,31 @@ def map_location_cuda(storage, loc):
parser = argparse.ArgumentParser()
parser.add_argument(
'--max_horizon', type=int, dest='max_horizon',
default=10, help='Max Horizon for forecasting')
"--max_horizon",
type=int,
dest="max_horizon",
default=10,
help="Max Horizon for forecasting",
)
parser.add_argument(
'--target_column_name', type=str, dest='target_column_name',
help='Target Column Name')
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
'--time_column_name', type=str, dest='time_column_name',
help='Time Column Name')
"--time_column_name", type=str, dest="time_column_name", help="Time Column Name"
)
parser.add_argument(
'--frequency', type=str, dest='freq',
help='Frequency of prediction')
"--frequency", type=str, dest="freq", help="Frequency of prediction"
)
parser.add_argument(
'--model_path', type=str, dest='model_path',
default='model.pkl', help='Filename of model to be loaded')
"--model_path",
type=str,
dest="model_path",
default="model.pkl",
help="Filename of model to be loaded",
)
args = parser.parse_args()
max_horizon = args.max_horizon
@@ -252,7 +293,7 @@ time_column_name = args.time_column_name
freq = args.freq
model_path = args.model_path
print('args passed are: ')
print("args passed are: ")
print(max_horizon)
print(target_column_name)
print(time_column_name)
@@ -261,39 +302,41 @@ print(model_path)
run = Run.get_context()
# get input dataset by name
test_dataset = run.input_datasets['test_data']
lookback_dataset = run.input_datasets['lookback_data']
test_dataset = run.input_datasets["test_data"]
lookback_dataset = run.input_datasets["lookback_data"]
grain_column_names = []
df = test_dataset.to_pandas_dataframe()
print('Read df')
print("Read df")
print(df)
X_test_df = test_dataset.drop_columns(columns=[target_column_name])
y_test_df = test_dataset.with_timestamp_columns(
None).keep_columns(columns=[target_column_name])
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(
columns=[target_column_name]
)
X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name])
y_lookback_df = lookback_dataset.with_timestamp_columns(
None).keep_columns(columns=[target_column_name])
y_lookback_df = lookback_dataset.with_timestamp_columns(None).keep_columns(
columns=[target_column_name]
)
_, ext = os.path.splitext(model_path)
if ext == '.pt':
if ext == ".pt":
# Load the fc-tcn torch model.
assert _torch_present
if torch.cuda.is_available():
map_location = map_location_cuda
else:
map_location = 'cpu'
with open(model_path, 'rb') as fh:
map_location = "cpu"
with open(model_path, "rb") as fh:
fitted_model = torch.load(fh, map_location=map_location)
else:
# Load the sklearn pipeline.
fitted_model = joblib.load(model_path)
if hasattr(fitted_model, 'get_lookback'):
if hasattr(fitted_model, "get_lookback"):
lookback = fitted_model.get_lookback()
df_all = do_rolling_forecast_with_lookback(
fitted_model,
@@ -302,26 +345,28 @@ if hasattr(fitted_model, 'get_lookback'):
max_horizon,
X_lookback_df.to_pandas_dataframe()[-lookback:],
y_lookback_df.to_pandas_dataframe().values.T[0][-lookback:],
freq)
freq,
)
else:
df_all = do_rolling_forecast(
fitted_model,
X_test_df.to_pandas_dataframe(),
y_test_df.to_pandas_dataframe().values.T[0],
max_horizon,
freq)
freq,
)
print(df_all)
print("target values:::")
print(df_all[target_column_name])
print("predicted values:::")
print(df_all['predicted'])
print(df_all["predicted"])
# Use the AutoML scoring module
regression_metrics = list(constants.REGRESSION_SCALAR_SET)
y_test = np.array(df_all[target_column_name])
y_pred = np.array(df_all['predicted'])
y_pred = np.array(df_all["predicted"])
scores = scoring.score_regression(y_test, y_pred, regression_metrics)
print("scores:")
@@ -331,12 +376,11 @@ for key, value in scores.items():
run.log(key, value)
print("Simple forecasting model")
rmse = np.sqrt(mean_squared_error(
df_all[target_column_name], df_all['predicted']))
rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all["predicted"]))
print("[Test Data] \nRoot Mean squared error: %.2f" % rmse)
mae = mean_absolute_error(df_all[target_column_name], df_all['predicted'])
print('mean_absolute_error score: %.2f' % mae)
print('MAPE: %.2f' % MAPE(df_all[target_column_name], df_all['predicted']))
mae = mean_absolute_error(df_all[target_column_name], df_all["predicted"])
print("mean_absolute_error score: %.2f" % mae)
print("MAPE: %.2f" % MAPE(df_all[target_column_name], df_all["predicted"]))
run.log('rmse', rmse)
run.log('mae', mae)
run.log("rmse", rmse)
run.log("mae", mae)

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-bike-share
dependencies:
- pip:
- azureml-sdk

View File

@@ -4,11 +4,14 @@ from sklearn.externals import joblib
parser = argparse.ArgumentParser()
parser.add_argument(
'--target_column_name', type=str, dest='target_column_name',
help='Target Column Name')
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
'--test_dataset', type=str, dest='test_dataset',
help='Test Dataset')
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
)
args = parser.parse_args()
target_column_name = args.target_column_name
@@ -20,19 +23,30 @@ ws = run.experiment.workspace
# get the input dataset by id
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
X_test_df = test_dataset.drop_columns(columns=[target_column_name]).to_pandas_dataframe().reset_index(drop=True)
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(columns=[target_column_name]).to_pandas_dataframe()
X_test_df = (
test_dataset.drop_columns(columns=[target_column_name])
.to_pandas_dataframe()
.reset_index(drop=True)
)
y_test_df = (
test_dataset.with_timestamp_columns(None)
.keep_columns(columns=[target_column_name])
.to_pandas_dataframe()
)
fitted_model = joblib.load('model.pkl')
fitted_model = joblib.load("model.pkl")
y_pred, X_trans = fitted_model.rolling_evaluation(X_test_df, y_test_df.values)
# Add predictions, actuals, and horizon relative to rolling origin to the test feature data
assign_dict = {'horizon_origin': X_trans['horizon_origin'].values, 'predicted': y_pred,
target_column_name: y_test_df[target_column_name].values}
assign_dict = {
"horizon_origin": X_trans["horizon_origin"].values,
"predicted": y_pred,
target_column_name: y_test_df[target_column_name].values,
}
df_all = X_test_df.assign(**assign_dict)
file_name = 'outputs/predictions.csv'
file_name = "outputs/predictions.csv"
export_csv = df_all.to_csv(file_name, header=True)
# Upload the predictions into artifacts

View File

@@ -1,32 +1,40 @@
from azureml.core import ScriptRunConfig
def run_rolling_forecast(test_experiment, compute_target, train_run,
test_dataset, target_column_name,
inference_folder='./forecast'):
train_run.download_file('outputs/model.pkl',
inference_folder + '/model.pkl')
def run_rolling_forecast(
test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
train_run.download_file("outputs/model.pkl", inference_folder + "/model.pkl")
inference_env = train_run.get_environment()
config = ScriptRunConfig(source_directory=inference_folder,
script='forecasting_script.py',
arguments=['--target_column_name',
target_column_name,
'--test_dataset',
test_dataset.as_named_input(test_dataset.name)],
compute_target=compute_target,
environment=inference_env)
config = ScriptRunConfig(
source_directory=inference_folder,
script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name,
"--test_dataset",
test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target,
environment=inference_env,
)
run = test_experiment.submit(config,
tags={'training_run_id':
train_run.id,
'run_algorithm':
train_run.properties['run_algorithm'],
'valid_score':
train_run.properties['score'],
'primary_metric':
train_run.properties['primary_metric']})
run = test_experiment.submit(
config,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags['run_algorithm'])
run.log("run_algorithm", run.tags["run_algorithm"])
return run

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-energy-demand
dependencies:
- pip:
- azureml-sdk

View File

@@ -1,44 +0,0 @@
import pandas as pd
import numpy as np
from pandas.tseries.frequencies import to_offset
def align_outputs(y_predicted, X_trans, X_test, y_test, target_column_name,
predicted_column_name='predicted',
horizon_colname='horizon_origin'):
"""
Demonstrates how to get the output aligned to the inputs
using pandas indexes. Helps understand what happened if
the output's shape differs from the input shape, or if
the data got re-sorted by time and grain during forecasting.
Typical causes of misalignment are:
* we predicted some periods that were missing in actuals -> drop from eval
* model was asked to predict past max_horizon -> increase max horizon
* data at start of X_test was needed for lags -> provide previous periods
"""
if (horizon_colname in X_trans):
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname]})
else:
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
# y and X outputs are aligned by forecast() function contract
df_fcst.index = X_trans.index
# align original X_test to y_test
X_test_full = X_test.copy()
X_test_full[target_column_name] = y_test
# X_test_full's index does not include origin, so reset for merge
df_fcst.reset_index(inplace=True)
X_test_full = X_test_full.reset_index().drop(columns='index')
together = df_fcst.merge(X_test_full, how='right')
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = together[together[[target_column_name,
predicted_column_name]].notnull().all(axis=1)]
return(clean)

View File

@@ -0,0 +1,61 @@
"""
This is the script that is executed on the compute instance. It relies
on the model.pkl file which is uploaded along with this script to the
compute instance.
"""
import argparse
from azureml.core import Dataset, Run
from sklearn.externals import joblib
from pandas.tseries.frequencies import to_offset
parser = argparse.ArgumentParser()
parser.add_argument(
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
)
args = parser.parse_args()
target_column_name = args.target_column_name
test_dataset_id = args.test_dataset
run = Run.get_context()
ws = run.experiment.workspace
# get the input dataset by id
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
X_test = test_dataset.to_pandas_dataframe().reset_index(drop=True)
y_test = X_test.pop(target_column_name).values
# generate forecast
fitted_model = joblib.load("model.pkl")
# We have default quantiles values set as below(95th percentile)
quantiles = [0.025, 0.5, 0.975]
predicted_column_name = "predicted"
PI = "prediction_interval"
fitted_model.quantiles = quantiles
pred_quantiles = fitted_model.forecast_quantiles(X_test)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
)
X_test[target_column_name] = y_test
X_test[PI] = pred_quantiles[PI]
X_test[predicted_column_name] = pred_quantiles[0.5]
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = X_test[
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
file_name = "outputs/predictions.csv"
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
# Upload the predictions into artifacts
run.upload_file(name=file_name, path_or_stream=file_name)

View File

@@ -1,22 +0,0 @@
import pandas as pd
import numpy as np
def APE(actual, pred):
"""
Calculate absolute percentage error.
Returns a vector of APE values with same length as actual/pred.
"""
return 100 * np.abs((actual - pred) / actual)
def MAPE(actual, pred):
"""
Calculate mean absolute percentage error.
Remove NA and values where actual is close to zero
"""
not_na = ~(np.isnan(actual) | np.isnan(pred))
not_zero = ~np.isclose(actual, 0.0)
actual_safe = actual[not_na & not_zero]
pred_safe = pred[not_na & not_zero]
return np.mean(APE(actual_safe, pred_safe))

View File

@@ -0,0 +1,49 @@
import os
import shutil
from azureml.core import ScriptRunConfig
def run_remote_inference(
test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
# Create local directory to copy the model.pkl and forecsting_script.py files into.
# These files will be uploaded to and executed on the compute instance.
os.makedirs(inference_folder, exist_ok=True)
shutil.copy("forecasting_script.py", inference_folder)
train_run.download_file(
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
)
inference_env = train_run.get_environment()
config = ScriptRunConfig(
source_directory=inference_folder,
script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name,
"--test_dataset",
test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target,
environment=inference_env,
)
run = test_experiment.submit(
config,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags["run_algorithm"])
return run

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-function
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,725 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.png)"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"# Automated Machine Learning\n",
"**Github DAU Forecasting**\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Data](#Data)\n",
"1. [Train](#Train)\n",
"1. [Evaluate](#Evaluate)"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"## Introduction\n",
"This notebook demonstrates demand forecasting for Github Daily Active Users Dataset using AutoML.\n",
"\n",
"AutoML highlights here include using Deep Learning forecasts, Arima, Prophet, Remote Execution and Remote Inferencing, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
"\n",
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
"\n",
"Notebook synopsis:\n",
"\n",
"1. Creating an Experiment in an existing Workspace\n",
"2. Configuration and remote run of AutoML for a time-series model exploring Regression learners, Arima, Prophet and DNNs\n",
"4. Evaluating the fitted model using a rolling test "
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"## Setup\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"import os\n",
"import azureml.core\n",
"import pandas as pd\n",
"import numpy as np\n",
"import logging\n",
"import warnings\n",
"\n",
"from pandas.tseries.frequencies import to_offset\n",
"\n",
"# Squash warning messages for cleaner output in the notebook\n",
"warnings.showwarning = lambda *args, **kwargs: None\n",
"\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.train.automl import AutoMLConfig\n",
"from matplotlib import pyplot as plt\n",
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
"from azureml.train.estimator import Estimator"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"As part of the setup you have already created a <b>Workspace</b>. To run AutoML, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for the run history container in the workspace\n",
"experiment_name = \"github-remote-cpu\"\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"### Using AmlCompute\n",
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# Choose a name for your CPU cluster\n",
"cpu_cluster_name = \"github-cluster\"\n",
"\n",
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"## Data\n",
"Read Github DAU data from file, and preview data."
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"Let's set up what we know about the dataset. \n",
"\n",
"**Target column** is what we want to forecast.\n",
"\n",
"**Time column** is the time axis along which to predict.\n",
"\n",
"**Time series identifier columns** are identified by values of the columns listed `time_series_id_column_names`, for example \"store\" and \"item\" if your data has multiple time series of sales, one series for each combination of store and item sold.\n",
"\n",
"**Forecast frequency (freq)** This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.\n",
"\n",
"This dataset has only one time series. Please see the [orange juice notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales) for an example of a multi-time series dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"import pandas as pd\n",
"from pandas import DataFrame\n",
"from pandas import Grouper\n",
"from pandas import concat\n",
"from pandas.plotting import register_matplotlib_converters\n",
"\n",
"register_matplotlib_converters()\n",
"plt.figure(figsize=(20, 10))\n",
"plt.tight_layout()\n",
"\n",
"plt.subplot(2, 1, 1)\n",
"plt.title(\"Github Daily Active User By Year\")\n",
"df = pd.read_csv(\"github_dau_2011-2018_train.csv\", parse_dates=True, index_col=\"date\")\n",
"test_df = pd.read_csv(\n",
" \"github_dau_2011-2018_test.csv\", parse_dates=True, index_col=\"date\"\n",
")\n",
"plt.plot(df)\n",
"\n",
"plt.subplot(2, 1, 2)\n",
"plt.title(\"Github Daily Active User By Month\")\n",
"groups = df.groupby(df.index.month)\n",
"months = concat([DataFrame(x[1].values) for x in groups], axis=1)\n",
"months = DataFrame(months)\n",
"months.columns = range(1, 49)\n",
"months.boxplot()\n",
"\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"target_column_name = \"count\"\n",
"time_column_name = \"date\"\n",
"time_series_id_column_names = []\n",
"freq = \"D\" # Daily data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Split Training data into Train and Validation set and Upload to Datastores"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from helper import split_fraction_by_grain\n",
"from helper import split_full_for_forecasting\n",
"\n",
"train, valid = split_full_for_forecasting(df, time_column_name)\n",
"train.to_csv(\"train.csv\")\n",
"valid.to_csv(\"valid.csv\")\n",
"test_df.to_csv(\"test.csv\")\n",
"\n",
"datastore = ws.get_default_datastore()\n",
"datastore.upload_files(\n",
" files=[\"./train.csv\"],\n",
" target_path=\"github-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./valid.csv\"],\n",
" target_path=\"github-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./test.csv\"],\n",
" target_path=\"github-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"\n",
"from azureml.core import Dataset\n",
"\n",
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/train.csv\")]\n",
")\n",
"valid_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/valid.csv\")]\n",
")\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/test.csv\")]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"### Setting forecaster maximum horizon \n",
"\n",
"The forecast horizon is the number of periods into the future that the model should predict. Here, we set the horizon to 12 periods (i.e. 12 months). Notice that this is much shorter than the number of months in the test set; we will need to use a rolling test to evaluate the performance on the whole test set. For more discussion of forecast horizons and guiding principles for setting them, please see the [energy demand notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand). "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"forecast_horizon = 12"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"## Train\n",
"\n",
"Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.\n",
"\n",
"|Property|Description|\n",
"|-|-|\n",
"|**task**|forecasting|\n",
"|**primary_metric**|This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>\n",
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
"|**training_data**|Input dataset, containing both features and label column.|\n",
"|**label_column_name**|The name of the label column.|\n",
"|**enable_dnn**|Enable Forecasting DNNs|\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"\n",
"forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name,\n",
" forecast_horizon=forecast_horizon,\n",
" freq=\"D\", # Set the forecast frequency to be daily\n",
")\n",
"\n",
"# We will disable the enable_early_stopping flag to ensure the DNN model is recommended for demonstration purpose.\n",
"automl_config = AutoMLConfig(\n",
" task=\"forecasting\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n",
" experiment_timeout_hours=1,\n",
" training_data=train_dataset,\n",
" label_column_name=target_column_name,\n",
" validation_data=valid_dataset,\n",
" verbosity=logging.INFO,\n",
" compute_target=compute_target,\n",
" max_concurrent_iterations=4,\n",
" max_cores_per_iteration=-1,\n",
" enable_dnn=True,\n",
" enable_early_stopping=False,\n",
" forecasting_parameters=forecasting_parameters,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"We will now run the experiment, starting with 10 iterations of model search. The experiment can be continued for more iterations if more accurate results are required. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"remote_run = experiment.submit(automl_config, show_output=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"# If you need to retrieve a run that already started, use the following code\n",
"# from azureml.train.automl.run import AutoMLRun\n",
"# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"Displaying the run objects gives you links to the visual tools in the Azure Portal. Go try them!"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"### Retrieve the Best Model for Each Algorithm\n",
"Below we select the best pipeline from our iterations. The get_output method on automl_classifier returns the best run and the fitted model for the last fit invocation. There are overloads on get_output that allow you to retrieve the best run and fitted model for any logged metric or a particular iteration."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from helper import get_result_df\n",
"\n",
"summary_df = get_result_df(remote_run)\n",
"summary_df"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from azureml.core.run import Run\n",
"from azureml.widgets import RunDetails\n",
"\n",
"forecast_model = \"TCNForecaster\"\n",
"if not forecast_model in summary_df[\"run_id\"]:\n",
" forecast_model = \"ForecastTCN\"\n",
"\n",
"best_dnn_run_id = summary_df[\"run_id\"][forecast_model]\n",
"best_dnn_run = Run(experiment, best_dnn_run_id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"best_dnn_run.parent\n",
"RunDetails(best_dnn_run.parent).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"best_dnn_run\n",
"RunDetails(best_dnn_run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"## Evaluate on Test Data"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"We now use the best fitted model from the AutoML Run to make forecasts for the test set. \n",
"\n",
"We always score on the original dataset whose schema matches the training set schema."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/test.csv\")]\n",
")\n",
"# preview the first 3 rows of the dataset\n",
"test_dataset.take(5).to_pandas_dataframe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"compute_target = ws.compute_targets[\"github-cluster\"]\n",
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"import os\n",
"import shutil\n",
"\n",
"script_folder = os.path.join(os.getcwd(), \"inference\")\n",
"os.makedirs(script_folder, exist_ok=True)\n",
"shutil.copy(\"infer.py\", script_folder)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from helper import run_inference\n",
"\n",
"test_run = run_inference(\n",
" test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" best_dnn_run,\n",
" test_dataset,\n",
" valid_dataset,\n",
" forecast_horizon,\n",
" target_column_name,\n",
" time_column_name,\n",
" freq,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"RunDetails(test_run).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from helper import run_multiple_inferences\n",
"\n",
"summary_df = run_multiple_inferences(\n",
" summary_df,\n",
" experiment,\n",
" test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" test_dataset,\n",
" valid_dataset,\n",
" forecast_horizon,\n",
" target_column_name,\n",
" time_column_name,\n",
" freq,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"for run_name, run_summary in summary_df.iterrows():\n",
" print(run_name)\n",
" print(run_summary)\n",
" run_id = run_summary.run_id\n",
" test_run_id = run_summary.test_run_id\n",
" test_run = Run(test_experiment, test_run_id)\n",
" test_run.wait_for_completion()\n",
" test_score = test_run.get_metrics()[run_summary.primary_metric]\n",
" summary_df.loc[summary_df.run_id == run_id, \"Test Score\"] = test_score\n",
" print(\"Test Score: \", test_score)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"summary_df"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"hide_code_all_hidden": false,
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,455 @@
date,count,day_of_week,month_of_year,holiday
2017-06-04,104663,6.0,5.0,0.0
2017-06-05,155824,0.0,5.0,0.0
2017-06-06,164908,1.0,5.0,0.0
2017-06-07,170309,2.0,5.0,0.0
2017-06-08,164256,3.0,5.0,0.0
2017-06-09,153406,4.0,5.0,0.0
2017-06-10,97024,5.0,5.0,0.0
2017-06-11,103442,6.0,5.0,0.0
2017-06-12,160768,0.0,5.0,0.0
2017-06-13,166288,1.0,5.0,0.0
2017-06-14,163819,2.0,5.0,0.0
2017-06-15,157593,3.0,5.0,0.0
2017-06-16,149259,4.0,5.0,0.0
2017-06-17,95579,5.0,5.0,0.0
2017-06-18,98723,6.0,5.0,0.0
2017-06-19,159076,0.0,5.0,0.0
2017-06-20,163340,1.0,5.0,0.0
2017-06-21,163344,2.0,5.0,0.0
2017-06-22,159528,3.0,5.0,0.0
2017-06-23,146563,4.0,5.0,0.0
2017-06-24,92631,5.0,5.0,0.0
2017-06-25,96549,6.0,5.0,0.0
2017-06-26,153249,0.0,5.0,0.0
2017-06-27,160357,1.0,5.0,0.0
2017-06-28,159941,2.0,5.0,0.0
2017-06-29,156781,3.0,5.0,0.0
2017-06-30,144709,4.0,5.0,0.0
2017-07-01,89101,5.0,6.0,0.0
2017-07-02,93046,6.0,6.0,0.0
2017-07-03,144113,0.0,6.0,0.0
2017-07-04,143061,1.0,6.0,1.0
2017-07-05,154603,2.0,6.0,0.0
2017-07-06,157200,3.0,6.0,0.0
2017-07-07,147213,4.0,6.0,0.0
2017-07-08,92348,5.0,6.0,0.0
2017-07-09,97018,6.0,6.0,0.0
2017-07-10,157192,0.0,6.0,0.0
2017-07-11,161819,1.0,6.0,0.0
2017-07-12,161998,2.0,6.0,0.0
2017-07-13,160280,3.0,6.0,0.0
2017-07-14,146818,4.0,6.0,0.0
2017-07-15,93041,5.0,6.0,0.0
2017-07-16,97505,6.0,6.0,0.0
2017-07-17,156167,0.0,6.0,0.0
2017-07-18,162855,1.0,6.0,0.0
2017-07-19,162519,2.0,6.0,0.0
2017-07-20,159941,3.0,6.0,0.0
2017-07-21,148460,4.0,6.0,0.0
2017-07-22,93431,5.0,6.0,0.0
2017-07-23,98553,6.0,6.0,0.0
2017-07-24,156202,0.0,6.0,0.0
2017-07-25,162503,1.0,6.0,0.0
2017-07-26,158479,2.0,6.0,0.0
2017-07-27,158192,3.0,6.0,0.0
2017-07-28,147108,4.0,6.0,0.0
2017-07-29,93799,5.0,6.0,0.0
2017-07-30,97920,6.0,6.0,0.0
2017-07-31,152197,0.0,6.0,0.0
2017-08-01,158477,1.0,7.0,0.0
2017-08-02,159089,2.0,7.0,0.0
2017-08-03,157182,3.0,7.0,0.0
2017-08-04,146345,4.0,7.0,0.0
2017-08-05,92534,5.0,7.0,0.0
2017-08-06,97128,6.0,7.0,0.0
2017-08-07,151359,0.0,7.0,0.0
2017-08-08,159895,1.0,7.0,0.0
2017-08-09,158329,2.0,7.0,0.0
2017-08-10,155468,3.0,7.0,0.0
2017-08-11,144914,4.0,7.0,0.0
2017-08-12,92258,5.0,7.0,0.0
2017-08-13,95933,6.0,7.0,0.0
2017-08-14,147706,0.0,7.0,0.0
2017-08-15,151115,1.0,7.0,0.0
2017-08-16,157640,2.0,7.0,0.0
2017-08-17,156600,3.0,7.0,0.0
2017-08-18,146980,4.0,7.0,0.0
2017-08-19,94592,5.0,7.0,0.0
2017-08-20,99320,6.0,7.0,0.0
2017-08-21,145727,0.0,7.0,0.0
2017-08-22,160260,1.0,7.0,0.0
2017-08-23,160440,2.0,7.0,0.0
2017-08-24,157830,3.0,7.0,0.0
2017-08-25,145822,4.0,7.0,0.0
2017-08-26,94706,5.0,7.0,0.0
2017-08-27,99047,6.0,7.0,0.0
2017-08-28,152112,0.0,7.0,0.0
2017-08-29,162440,1.0,7.0,0.0
2017-08-30,162902,2.0,7.0,0.0
2017-08-31,159498,3.0,7.0,0.0
2017-09-01,145689,4.0,8.0,0.0
2017-09-02,93589,5.0,8.0,0.0
2017-09-03,100058,6.0,8.0,0.0
2017-09-04,140865,0.0,8.0,1.0
2017-09-05,165715,1.0,8.0,0.0
2017-09-06,167463,2.0,8.0,0.0
2017-09-07,164811,3.0,8.0,0.0
2017-09-08,156157,4.0,8.0,0.0
2017-09-09,101358,5.0,8.0,0.0
2017-09-10,107915,6.0,8.0,0.0
2017-09-11,167845,0.0,8.0,0.0
2017-09-12,172756,1.0,8.0,0.0
2017-09-13,172851,2.0,8.0,0.0
2017-09-14,171675,3.0,8.0,0.0
2017-09-15,159266,4.0,8.0,0.0
2017-09-16,103547,5.0,8.0,0.0
2017-09-17,110964,6.0,8.0,0.0
2017-09-18,170976,0.0,8.0,0.0
2017-09-19,177864,1.0,8.0,0.0
2017-09-20,173567,2.0,8.0,0.0
2017-09-21,172017,3.0,8.0,0.0
2017-09-22,161357,4.0,8.0,0.0
2017-09-23,104681,5.0,8.0,0.0
2017-09-24,111711,6.0,8.0,0.0
2017-09-25,173517,0.0,8.0,0.0
2017-09-26,180049,1.0,8.0,0.0
2017-09-27,178307,2.0,8.0,0.0
2017-09-28,174157,3.0,8.0,0.0
2017-09-29,161707,4.0,8.0,0.0
2017-09-30,110536,5.0,8.0,0.0
2017-10-01,106505,6.0,9.0,0.0
2017-10-02,157565,0.0,9.0,0.0
2017-10-03,164764,1.0,9.0,0.0
2017-10-04,163383,2.0,9.0,0.0
2017-10-05,162847,3.0,9.0,0.0
2017-10-06,153575,4.0,9.0,0.0
2017-10-07,107472,5.0,9.0,0.0
2017-10-08,116127,6.0,9.0,0.0
2017-10-09,174457,0.0,9.0,1.0
2017-10-10,185217,1.0,9.0,0.0
2017-10-11,185120,2.0,9.0,0.0
2017-10-12,180844,3.0,9.0,0.0
2017-10-13,170178,4.0,9.0,0.0
2017-10-14,112754,5.0,9.0,0.0
2017-10-15,121251,6.0,9.0,0.0
2017-10-16,183906,0.0,9.0,0.0
2017-10-17,188945,1.0,9.0,0.0
2017-10-18,187297,2.0,9.0,0.0
2017-10-19,183867,3.0,9.0,0.0
2017-10-20,173021,4.0,9.0,0.0
2017-10-21,115851,5.0,9.0,0.0
2017-10-22,126088,6.0,9.0,0.0
2017-10-23,189452,0.0,9.0,0.0
2017-10-24,194412,1.0,9.0,0.0
2017-10-25,192293,2.0,9.0,0.0
2017-10-26,190163,3.0,9.0,0.0
2017-10-27,177053,4.0,9.0,0.0
2017-10-28,114934,5.0,9.0,0.0
2017-10-29,125289,6.0,9.0,0.0
2017-10-30,189245,0.0,9.0,0.0
2017-10-31,191480,1.0,9.0,0.0
2017-11-01,182281,2.0,10.0,0.0
2017-11-02,186351,3.0,10.0,0.0
2017-11-03,175422,4.0,10.0,0.0
2017-11-04,118160,5.0,10.0,0.0
2017-11-05,127602,6.0,10.0,0.0
2017-11-06,191067,0.0,10.0,0.0
2017-11-07,197083,1.0,10.0,0.0
2017-11-08,194333,2.0,10.0,0.0
2017-11-09,193914,3.0,10.0,0.0
2017-11-10,179933,4.0,10.0,1.0
2017-11-11,121346,5.0,10.0,0.0
2017-11-12,131900,6.0,10.0,0.0
2017-11-13,196969,0.0,10.0,0.0
2017-11-14,201949,1.0,10.0,0.0
2017-11-15,198424,2.0,10.0,0.0
2017-11-16,196902,3.0,10.0,0.0
2017-11-17,183893,4.0,10.0,0.0
2017-11-18,122767,5.0,10.0,0.0
2017-11-19,130890,6.0,10.0,0.0
2017-11-20,194515,0.0,10.0,0.0
2017-11-21,198601,1.0,10.0,0.0
2017-11-22,191041,2.0,10.0,0.0
2017-11-23,170321,3.0,10.0,1.0
2017-11-24,155623,4.0,10.0,0.0
2017-11-25,115759,5.0,10.0,0.0
2017-11-26,128771,6.0,10.0,0.0
2017-11-27,199419,0.0,10.0,0.0
2017-11-28,207253,1.0,10.0,0.0
2017-11-29,205406,2.0,10.0,0.0
2017-11-30,200674,3.0,10.0,0.0
2017-12-01,187017,4.0,11.0,0.0
2017-12-02,129735,5.0,11.0,0.0
2017-12-03,139120,6.0,11.0,0.0
2017-12-04,205505,0.0,11.0,0.0
2017-12-05,208218,1.0,11.0,0.0
2017-12-06,202480,2.0,11.0,0.0
2017-12-07,197822,3.0,11.0,0.0
2017-12-08,180686,4.0,11.0,0.0
2017-12-09,123667,5.0,11.0,0.0
2017-12-10,130987,6.0,11.0,0.0
2017-12-11,193901,0.0,11.0,0.0
2017-12-12,194997,1.0,11.0,0.0
2017-12-13,192063,2.0,11.0,0.0
2017-12-14,186496,3.0,11.0,0.0
2017-12-15,170812,4.0,11.0,0.0
2017-12-16,110474,5.0,11.0,0.0
2017-12-17,118165,6.0,11.0,0.0
2017-12-18,176843,0.0,11.0,0.0
2017-12-19,179550,1.0,11.0,0.0
2017-12-20,173506,2.0,11.0,0.0
2017-12-21,165910,3.0,11.0,0.0
2017-12-22,145886,4.0,11.0,0.0
2017-12-23,95246,5.0,11.0,0.0
2017-12-24,88781,6.0,11.0,0.0
2017-12-25,98189,0.0,11.0,1.0
2017-12-26,121383,1.0,11.0,0.0
2017-12-27,135300,2.0,11.0,0.0
2017-12-28,136827,3.0,11.0,0.0
2017-12-29,127700,4.0,11.0,0.0
2017-12-30,93014,5.0,11.0,0.0
2017-12-31,82878,6.0,11.0,0.0
2018-01-01,86419,0.0,0.0,1.0
2018-01-02,147428,1.0,0.0,0.0
2018-01-03,162193,2.0,0.0,0.0
2018-01-04,163784,3.0,0.0,0.0
2018-01-05,158606,4.0,0.0,0.0
2018-01-06,113467,5.0,0.0,0.0
2018-01-07,118313,6.0,0.0,0.0
2018-01-08,175623,0.0,0.0,0.0
2018-01-09,183880,1.0,0.0,0.0
2018-01-10,183945,2.0,0.0,0.0
2018-01-11,181769,3.0,0.0,0.0
2018-01-12,170552,4.0,0.0,0.0
2018-01-13,115707,5.0,0.0,0.0
2018-01-14,121191,6.0,0.0,0.0
2018-01-15,176127,0.0,0.0,1.0
2018-01-16,188032,1.0,0.0,0.0
2018-01-17,189871,2.0,0.0,0.0
2018-01-18,189348,3.0,0.0,0.0
2018-01-19,177456,4.0,0.0,0.0
2018-01-20,123321,5.0,0.0,0.0
2018-01-21,128306,6.0,0.0,0.0
2018-01-22,186132,0.0,0.0,0.0
2018-01-23,197618,1.0,0.0,0.0
2018-01-24,196402,2.0,0.0,0.0
2018-01-25,192722,3.0,0.0,0.0
2018-01-26,179415,4.0,0.0,0.0
2018-01-27,125769,5.0,0.0,0.0
2018-01-28,133306,6.0,0.0,0.0
2018-01-29,194151,0.0,0.0,0.0
2018-01-30,198680,1.0,0.0,0.0
2018-01-31,198652,2.0,0.0,0.0
2018-02-01,195472,3.0,1.0,0.0
2018-02-02,183173,4.0,1.0,0.0
2018-02-03,124276,5.0,1.0,0.0
2018-02-04,129054,6.0,1.0,0.0
2018-02-05,190024,0.0,1.0,0.0
2018-02-06,198658,1.0,1.0,0.0
2018-02-07,198272,2.0,1.0,0.0
2018-02-08,195339,3.0,1.0,0.0
2018-02-09,183086,4.0,1.0,0.0
2018-02-10,122536,5.0,1.0,0.0
2018-02-11,133033,6.0,1.0,0.0
2018-02-12,185386,0.0,1.0,0.0
2018-02-13,184789,1.0,1.0,0.0
2018-02-14,176089,2.0,1.0,0.0
2018-02-15,171317,3.0,1.0,0.0
2018-02-16,162693,4.0,1.0,0.0
2018-02-17,116342,5.0,1.0,0.0
2018-02-18,122466,6.0,1.0,0.0
2018-02-19,172364,0.0,1.0,1.0
2018-02-20,185896,1.0,1.0,0.0
2018-02-21,188166,2.0,1.0,0.0
2018-02-22,189427,3.0,1.0,0.0
2018-02-23,178732,4.0,1.0,0.0
2018-02-24,132664,5.0,1.0,0.0
2018-02-25,134008,6.0,1.0,0.0
2018-02-26,200075,0.0,1.0,0.0
2018-02-27,207996,1.0,1.0,0.0
2018-02-28,204416,2.0,1.0,0.0
2018-03-01,201320,3.0,2.0,0.0
2018-03-02,188205,4.0,2.0,0.0
2018-03-03,131162,5.0,2.0,0.0
2018-03-04,138320,6.0,2.0,0.0
2018-03-05,207326,0.0,2.0,0.0
2018-03-06,212462,1.0,2.0,0.0
2018-03-07,209357,2.0,2.0,0.0
2018-03-08,194876,3.0,2.0,0.0
2018-03-09,193761,4.0,2.0,0.0
2018-03-10,133449,5.0,2.0,0.0
2018-03-11,142258,6.0,2.0,0.0
2018-03-12,208753,0.0,2.0,0.0
2018-03-13,210602,1.0,2.0,0.0
2018-03-14,214236,2.0,2.0,0.0
2018-03-15,210761,3.0,2.0,0.0
2018-03-16,196619,4.0,2.0,0.0
2018-03-17,133056,5.0,2.0,0.0
2018-03-18,141335,6.0,2.0,0.0
2018-03-19,211580,0.0,2.0,0.0
2018-03-20,219051,1.0,2.0,0.0
2018-03-21,215435,2.0,2.0,0.0
2018-03-22,211961,3.0,2.0,0.0
2018-03-23,196009,4.0,2.0,0.0
2018-03-24,132390,5.0,2.0,0.0
2018-03-25,140021,6.0,2.0,0.0
2018-03-26,205273,0.0,2.0,0.0
2018-03-27,212686,1.0,2.0,0.0
2018-03-28,210683,2.0,2.0,0.0
2018-03-29,189044,3.0,2.0,0.0
2018-03-30,170256,4.0,2.0,0.0
2018-03-31,125999,5.0,2.0,0.0
2018-04-01,126749,6.0,3.0,0.0
2018-04-02,186546,0.0,3.0,0.0
2018-04-03,207905,1.0,3.0,0.0
2018-04-04,201528,2.0,3.0,0.0
2018-04-05,188580,3.0,3.0,0.0
2018-04-06,173714,4.0,3.0,0.0
2018-04-07,125723,5.0,3.0,0.0
2018-04-08,142545,6.0,3.0,0.0
2018-04-09,204767,0.0,3.0,0.0
2018-04-10,212048,1.0,3.0,0.0
2018-04-11,210517,2.0,3.0,0.0
2018-04-12,206924,3.0,3.0,0.0
2018-04-13,191679,4.0,3.0,0.0
2018-04-14,126394,5.0,3.0,0.0
2018-04-15,137279,6.0,3.0,0.0
2018-04-16,208085,0.0,3.0,0.0
2018-04-17,213273,1.0,3.0,0.0
2018-04-18,211580,2.0,3.0,0.0
2018-04-19,206037,3.0,3.0,0.0
2018-04-20,191211,4.0,3.0,0.0
2018-04-21,125564,5.0,3.0,0.0
2018-04-22,136469,6.0,3.0,0.0
2018-04-23,206288,0.0,3.0,0.0
2018-04-24,212115,1.0,3.0,0.0
2018-04-25,207948,2.0,3.0,0.0
2018-04-26,205759,3.0,3.0,0.0
2018-04-27,181330,4.0,3.0,0.0
2018-04-28,130046,5.0,3.0,0.0
2018-04-29,120802,6.0,3.0,0.0
2018-04-30,170390,0.0,3.0,0.0
2018-05-01,169054,1.0,4.0,0.0
2018-05-02,197891,2.0,4.0,0.0
2018-05-03,199820,3.0,4.0,0.0
2018-05-04,186783,4.0,4.0,0.0
2018-05-05,124420,5.0,4.0,0.0
2018-05-06,130666,6.0,4.0,0.0
2018-05-07,196014,0.0,4.0,0.0
2018-05-08,203058,1.0,4.0,0.0
2018-05-09,198582,2.0,4.0,0.0
2018-05-10,191321,3.0,4.0,0.0
2018-05-11,183639,4.0,4.0,0.0
2018-05-12,122023,5.0,4.0,0.0
2018-05-13,128775,6.0,4.0,0.0
2018-05-14,199104,0.0,4.0,0.0
2018-05-15,200658,1.0,4.0,0.0
2018-05-16,201541,2.0,4.0,0.0
2018-05-17,196886,3.0,4.0,0.0
2018-05-18,188597,4.0,4.0,0.0
2018-05-19,121392,5.0,4.0,0.0
2018-05-20,126981,6.0,4.0,0.0
2018-05-21,189291,0.0,4.0,0.0
2018-05-22,203038,1.0,4.0,0.0
2018-05-23,205330,2.0,4.0,0.0
2018-05-24,199208,3.0,4.0,0.0
2018-05-25,187768,4.0,4.0,0.0
2018-05-26,117635,5.0,4.0,0.0
2018-05-27,124352,6.0,4.0,0.0
2018-05-28,180398,0.0,4.0,1.0
2018-05-29,194170,1.0,4.0,0.0
2018-05-30,200281,2.0,4.0,0.0
2018-05-31,197244,3.0,4.0,0.0
2018-06-01,184037,4.0,5.0,0.0
2018-06-02,121135,5.0,5.0,0.0
2018-06-03,129389,6.0,5.0,0.0
2018-06-04,200331,0.0,5.0,0.0
2018-06-05,207735,1.0,5.0,0.0
2018-06-06,203354,2.0,5.0,0.0
2018-06-07,200520,3.0,5.0,0.0
2018-06-08,182038,4.0,5.0,0.0
2018-06-09,120164,5.0,5.0,0.0
2018-06-10,125256,6.0,5.0,0.0
2018-06-11,194786,0.0,5.0,0.0
2018-06-12,200815,1.0,5.0,0.0
2018-06-13,197740,2.0,5.0,0.0
2018-06-14,192294,3.0,5.0,0.0
2018-06-15,173587,4.0,5.0,0.0
2018-06-16,105955,5.0,5.0,0.0
2018-06-17,110780,6.0,5.0,0.0
2018-06-18,174582,0.0,5.0,0.0
2018-06-19,193310,1.0,5.0,0.0
2018-06-20,193062,2.0,5.0,0.0
2018-06-21,187986,3.0,5.0,0.0
2018-06-22,173606,4.0,5.0,0.0
2018-06-23,111795,5.0,5.0,0.0
2018-06-24,116134,6.0,5.0,0.0
2018-06-25,185919,0.0,5.0,0.0
2018-06-26,193142,1.0,5.0,0.0
2018-06-27,188114,2.0,5.0,0.0
2018-06-28,183737,3.0,5.0,0.0
2018-06-29,171496,4.0,5.0,0.0
2018-06-30,107210,5.0,5.0,0.0
2018-07-01,111053,6.0,6.0,0.0
2018-07-02,176198,0.0,6.0,0.0
2018-07-03,184040,1.0,6.0,0.0
2018-07-04,169783,2.0,6.0,1.0
2018-07-05,177996,3.0,6.0,0.0
2018-07-06,167378,4.0,6.0,0.0
2018-07-07,106401,5.0,6.0,0.0
2018-07-08,112327,6.0,6.0,0.0
2018-07-09,182835,0.0,6.0,0.0
2018-07-10,187694,1.0,6.0,0.0
2018-07-11,185762,2.0,6.0,0.0
2018-07-12,184099,3.0,6.0,0.0
2018-07-13,170860,4.0,6.0,0.0
2018-07-14,106799,5.0,6.0,0.0
2018-07-15,108475,6.0,6.0,0.0
2018-07-16,175704,0.0,6.0,0.0
2018-07-17,183596,1.0,6.0,0.0
2018-07-18,179897,2.0,6.0,0.0
2018-07-19,183373,3.0,6.0,0.0
2018-07-20,169626,4.0,6.0,0.0
2018-07-21,106785,5.0,6.0,0.0
2018-07-22,112387,6.0,6.0,0.0
2018-07-23,180572,0.0,6.0,0.0
2018-07-24,186943,1.0,6.0,0.0
2018-07-25,185744,2.0,6.0,0.0
2018-07-26,183117,3.0,6.0,0.0
2018-07-27,168526,4.0,6.0,0.0
2018-07-28,105936,5.0,6.0,0.0
2018-07-29,111708,6.0,6.0,0.0
2018-07-30,179950,0.0,6.0,0.0
2018-07-31,185930,1.0,6.0,0.0
2018-08-01,183366,2.0,7.0,0.0
2018-08-02,182412,3.0,7.0,0.0
2018-08-03,173429,4.0,7.0,0.0
2018-08-04,106108,5.0,7.0,0.0
2018-08-05,110059,6.0,7.0,0.0
2018-08-06,178355,0.0,7.0,0.0
2018-08-07,185518,1.0,7.0,0.0
2018-08-08,183204,2.0,7.0,0.0
2018-08-09,181276,3.0,7.0,0.0
2018-08-10,168297,4.0,7.0,0.0
2018-08-11,106488,5.0,7.0,0.0
2018-08-12,111786,6.0,7.0,0.0
2018-08-13,178620,0.0,7.0,0.0
2018-08-14,181922,1.0,7.0,0.0
2018-08-15,172198,2.0,7.0,0.0
2018-08-16,177367,3.0,7.0,0.0
2018-08-17,166550,4.0,7.0,0.0
2018-08-18,107011,5.0,7.0,0.0
2018-08-19,112299,6.0,7.0,0.0
2018-08-20,176718,0.0,7.0,0.0
2018-08-21,182562,1.0,7.0,0.0
2018-08-22,181484,2.0,7.0,0.0
2018-08-23,180317,3.0,7.0,0.0
2018-08-24,170197,4.0,7.0,0.0
2018-08-25,109383,5.0,7.0,0.0
2018-08-26,113373,6.0,7.0,0.0
2018-08-27,180142,0.0,7.0,0.0
2018-08-28,191628,1.0,7.0,0.0
2018-08-29,191149,2.0,7.0,0.0
2018-08-30,187503,3.0,7.0,0.0
2018-08-31,172280,4.0,7.0,0.0
1 date count day_of_week month_of_year holiday
2 2017-06-04 104663 6.0 5.0 0.0
3 2017-06-05 155824 0.0 5.0 0.0
4 2017-06-06 164908 1.0 5.0 0.0
5 2017-06-07 170309 2.0 5.0 0.0
6 2017-06-08 164256 3.0 5.0 0.0
7 2017-06-09 153406 4.0 5.0 0.0
8 2017-06-10 97024 5.0 5.0 0.0
9 2017-06-11 103442 6.0 5.0 0.0
10 2017-06-12 160768 0.0 5.0 0.0
11 2017-06-13 166288 1.0 5.0 0.0
12 2017-06-14 163819 2.0 5.0 0.0
13 2017-06-15 157593 3.0 5.0 0.0
14 2017-06-16 149259 4.0 5.0 0.0
15 2017-06-17 95579 5.0 5.0 0.0
16 2017-06-18 98723 6.0 5.0 0.0
17 2017-06-19 159076 0.0 5.0 0.0
18 2017-06-20 163340 1.0 5.0 0.0
19 2017-06-21 163344 2.0 5.0 0.0
20 2017-06-22 159528 3.0 5.0 0.0
21 2017-06-23 146563 4.0 5.0 0.0
22 2017-06-24 92631 5.0 5.0 0.0
23 2017-06-25 96549 6.0 5.0 0.0
24 2017-06-26 153249 0.0 5.0 0.0
25 2017-06-27 160357 1.0 5.0 0.0
26 2017-06-28 159941 2.0 5.0 0.0
27 2017-06-29 156781 3.0 5.0 0.0
28 2017-06-30 144709 4.0 5.0 0.0
29 2017-07-01 89101 5.0 6.0 0.0
30 2017-07-02 93046 6.0 6.0 0.0
31 2017-07-03 144113 0.0 6.0 0.0
32 2017-07-04 143061 1.0 6.0 1.0
33 2017-07-05 154603 2.0 6.0 0.0
34 2017-07-06 157200 3.0 6.0 0.0
35 2017-07-07 147213 4.0 6.0 0.0
36 2017-07-08 92348 5.0 6.0 0.0
37 2017-07-09 97018 6.0 6.0 0.0
38 2017-07-10 157192 0.0 6.0 0.0
39 2017-07-11 161819 1.0 6.0 0.0
40 2017-07-12 161998 2.0 6.0 0.0
41 2017-07-13 160280 3.0 6.0 0.0
42 2017-07-14 146818 4.0 6.0 0.0
43 2017-07-15 93041 5.0 6.0 0.0
44 2017-07-16 97505 6.0 6.0 0.0
45 2017-07-17 156167 0.0 6.0 0.0
46 2017-07-18 162855 1.0 6.0 0.0
47 2017-07-19 162519 2.0 6.0 0.0
48 2017-07-20 159941 3.0 6.0 0.0
49 2017-07-21 148460 4.0 6.0 0.0
50 2017-07-22 93431 5.0 6.0 0.0
51 2017-07-23 98553 6.0 6.0 0.0
52 2017-07-24 156202 0.0 6.0 0.0
53 2017-07-25 162503 1.0 6.0 0.0
54 2017-07-26 158479 2.0 6.0 0.0
55 2017-07-27 158192 3.0 6.0 0.0
56 2017-07-28 147108 4.0 6.0 0.0
57 2017-07-29 93799 5.0 6.0 0.0
58 2017-07-30 97920 6.0 6.0 0.0
59 2017-07-31 152197 0.0 6.0 0.0
60 2017-08-01 158477 1.0 7.0 0.0
61 2017-08-02 159089 2.0 7.0 0.0
62 2017-08-03 157182 3.0 7.0 0.0
63 2017-08-04 146345 4.0 7.0 0.0
64 2017-08-05 92534 5.0 7.0 0.0
65 2017-08-06 97128 6.0 7.0 0.0
66 2017-08-07 151359 0.0 7.0 0.0
67 2017-08-08 159895 1.0 7.0 0.0
68 2017-08-09 158329 2.0 7.0 0.0
69 2017-08-10 155468 3.0 7.0 0.0
70 2017-08-11 144914 4.0 7.0 0.0
71 2017-08-12 92258 5.0 7.0 0.0
72 2017-08-13 95933 6.0 7.0 0.0
73 2017-08-14 147706 0.0 7.0 0.0
74 2017-08-15 151115 1.0 7.0 0.0
75 2017-08-16 157640 2.0 7.0 0.0
76 2017-08-17 156600 3.0 7.0 0.0
77 2017-08-18 146980 4.0 7.0 0.0
78 2017-08-19 94592 5.0 7.0 0.0
79 2017-08-20 99320 6.0 7.0 0.0
80 2017-08-21 145727 0.0 7.0 0.0
81 2017-08-22 160260 1.0 7.0 0.0
82 2017-08-23 160440 2.0 7.0 0.0
83 2017-08-24 157830 3.0 7.0 0.0
84 2017-08-25 145822 4.0 7.0 0.0
85 2017-08-26 94706 5.0 7.0 0.0
86 2017-08-27 99047 6.0 7.0 0.0
87 2017-08-28 152112 0.0 7.0 0.0
88 2017-08-29 162440 1.0 7.0 0.0
89 2017-08-30 162902 2.0 7.0 0.0
90 2017-08-31 159498 3.0 7.0 0.0
91 2017-09-01 145689 4.0 8.0 0.0
92 2017-09-02 93589 5.0 8.0 0.0
93 2017-09-03 100058 6.0 8.0 0.0
94 2017-09-04 140865 0.0 8.0 1.0
95 2017-09-05 165715 1.0 8.0 0.0
96 2017-09-06 167463 2.0 8.0 0.0
97 2017-09-07 164811 3.0 8.0 0.0
98 2017-09-08 156157 4.0 8.0 0.0
99 2017-09-09 101358 5.0 8.0 0.0
100 2017-09-10 107915 6.0 8.0 0.0
101 2017-09-11 167845 0.0 8.0 0.0
102 2017-09-12 172756 1.0 8.0 0.0
103 2017-09-13 172851 2.0 8.0 0.0
104 2017-09-14 171675 3.0 8.0 0.0
105 2017-09-15 159266 4.0 8.0 0.0
106 2017-09-16 103547 5.0 8.0 0.0
107 2017-09-17 110964 6.0 8.0 0.0
108 2017-09-18 170976 0.0 8.0 0.0
109 2017-09-19 177864 1.0 8.0 0.0
110 2017-09-20 173567 2.0 8.0 0.0
111 2017-09-21 172017 3.0 8.0 0.0
112 2017-09-22 161357 4.0 8.0 0.0
113 2017-09-23 104681 5.0 8.0 0.0
114 2017-09-24 111711 6.0 8.0 0.0
115 2017-09-25 173517 0.0 8.0 0.0
116 2017-09-26 180049 1.0 8.0 0.0
117 2017-09-27 178307 2.0 8.0 0.0
118 2017-09-28 174157 3.0 8.0 0.0
119 2017-09-29 161707 4.0 8.0 0.0
120 2017-09-30 110536 5.0 8.0 0.0
121 2017-10-01 106505 6.0 9.0 0.0
122 2017-10-02 157565 0.0 9.0 0.0
123 2017-10-03 164764 1.0 9.0 0.0
124 2017-10-04 163383 2.0 9.0 0.0
125 2017-10-05 162847 3.0 9.0 0.0
126 2017-10-06 153575 4.0 9.0 0.0
127 2017-10-07 107472 5.0 9.0 0.0
128 2017-10-08 116127 6.0 9.0 0.0
129 2017-10-09 174457 0.0 9.0 1.0
130 2017-10-10 185217 1.0 9.0 0.0
131 2017-10-11 185120 2.0 9.0 0.0
132 2017-10-12 180844 3.0 9.0 0.0
133 2017-10-13 170178 4.0 9.0 0.0
134 2017-10-14 112754 5.0 9.0 0.0
135 2017-10-15 121251 6.0 9.0 0.0
136 2017-10-16 183906 0.0 9.0 0.0
137 2017-10-17 188945 1.0 9.0 0.0
138 2017-10-18 187297 2.0 9.0 0.0
139 2017-10-19 183867 3.0 9.0 0.0
140 2017-10-20 173021 4.0 9.0 0.0
141 2017-10-21 115851 5.0 9.0 0.0
142 2017-10-22 126088 6.0 9.0 0.0
143 2017-10-23 189452 0.0 9.0 0.0
144 2017-10-24 194412 1.0 9.0 0.0
145 2017-10-25 192293 2.0 9.0 0.0
146 2017-10-26 190163 3.0 9.0 0.0
147 2017-10-27 177053 4.0 9.0 0.0
148 2017-10-28 114934 5.0 9.0 0.0
149 2017-10-29 125289 6.0 9.0 0.0
150 2017-10-30 189245 0.0 9.0 0.0
151 2017-10-31 191480 1.0 9.0 0.0
152 2017-11-01 182281 2.0 10.0 0.0
153 2017-11-02 186351 3.0 10.0 0.0
154 2017-11-03 175422 4.0 10.0 0.0
155 2017-11-04 118160 5.0 10.0 0.0
156 2017-11-05 127602 6.0 10.0 0.0
157 2017-11-06 191067 0.0 10.0 0.0
158 2017-11-07 197083 1.0 10.0 0.0
159 2017-11-08 194333 2.0 10.0 0.0
160 2017-11-09 193914 3.0 10.0 0.0
161 2017-11-10 179933 4.0 10.0 1.0
162 2017-11-11 121346 5.0 10.0 0.0
163 2017-11-12 131900 6.0 10.0 0.0
164 2017-11-13 196969 0.0 10.0 0.0
165 2017-11-14 201949 1.0 10.0 0.0
166 2017-11-15 198424 2.0 10.0 0.0
167 2017-11-16 196902 3.0 10.0 0.0
168 2017-11-17 183893 4.0 10.0 0.0
169 2017-11-18 122767 5.0 10.0 0.0
170 2017-11-19 130890 6.0 10.0 0.0
171 2017-11-20 194515 0.0 10.0 0.0
172 2017-11-21 198601 1.0 10.0 0.0
173 2017-11-22 191041 2.0 10.0 0.0
174 2017-11-23 170321 3.0 10.0 1.0
175 2017-11-24 155623 4.0 10.0 0.0
176 2017-11-25 115759 5.0 10.0 0.0
177 2017-11-26 128771 6.0 10.0 0.0
178 2017-11-27 199419 0.0 10.0 0.0
179 2017-11-28 207253 1.0 10.0 0.0
180 2017-11-29 205406 2.0 10.0 0.0
181 2017-11-30 200674 3.0 10.0 0.0
182 2017-12-01 187017 4.0 11.0 0.0
183 2017-12-02 129735 5.0 11.0 0.0
184 2017-12-03 139120 6.0 11.0 0.0
185 2017-12-04 205505 0.0 11.0 0.0
186 2017-12-05 208218 1.0 11.0 0.0
187 2017-12-06 202480 2.0 11.0 0.0
188 2017-12-07 197822 3.0 11.0 0.0
189 2017-12-08 180686 4.0 11.0 0.0
190 2017-12-09 123667 5.0 11.0 0.0
191 2017-12-10 130987 6.0 11.0 0.0
192 2017-12-11 193901 0.0 11.0 0.0
193 2017-12-12 194997 1.0 11.0 0.0
194 2017-12-13 192063 2.0 11.0 0.0
195 2017-12-14 186496 3.0 11.0 0.0
196 2017-12-15 170812 4.0 11.0 0.0
197 2017-12-16 110474 5.0 11.0 0.0
198 2017-12-17 118165 6.0 11.0 0.0
199 2017-12-18 176843 0.0 11.0 0.0
200 2017-12-19 179550 1.0 11.0 0.0
201 2017-12-20 173506 2.0 11.0 0.0
202 2017-12-21 165910 3.0 11.0 0.0
203 2017-12-22 145886 4.0 11.0 0.0
204 2017-12-23 95246 5.0 11.0 0.0
205 2017-12-24 88781 6.0 11.0 0.0
206 2017-12-25 98189 0.0 11.0 1.0
207 2017-12-26 121383 1.0 11.0 0.0
208 2017-12-27 135300 2.0 11.0 0.0
209 2017-12-28 136827 3.0 11.0 0.0
210 2017-12-29 127700 4.0 11.0 0.0
211 2017-12-30 93014 5.0 11.0 0.0
212 2017-12-31 82878 6.0 11.0 0.0
213 2018-01-01 86419 0.0 0.0 1.0
214 2018-01-02 147428 1.0 0.0 0.0
215 2018-01-03 162193 2.0 0.0 0.0
216 2018-01-04 163784 3.0 0.0 0.0
217 2018-01-05 158606 4.0 0.0 0.0
218 2018-01-06 113467 5.0 0.0 0.0
219 2018-01-07 118313 6.0 0.0 0.0
220 2018-01-08 175623 0.0 0.0 0.0
221 2018-01-09 183880 1.0 0.0 0.0
222 2018-01-10 183945 2.0 0.0 0.0
223 2018-01-11 181769 3.0 0.0 0.0
224 2018-01-12 170552 4.0 0.0 0.0
225 2018-01-13 115707 5.0 0.0 0.0
226 2018-01-14 121191 6.0 0.0 0.0
227 2018-01-15 176127 0.0 0.0 1.0
228 2018-01-16 188032 1.0 0.0 0.0
229 2018-01-17 189871 2.0 0.0 0.0
230 2018-01-18 189348 3.0 0.0 0.0
231 2018-01-19 177456 4.0 0.0 0.0
232 2018-01-20 123321 5.0 0.0 0.0
233 2018-01-21 128306 6.0 0.0 0.0
234 2018-01-22 186132 0.0 0.0 0.0
235 2018-01-23 197618 1.0 0.0 0.0
236 2018-01-24 196402 2.0 0.0 0.0
237 2018-01-25 192722 3.0 0.0 0.0
238 2018-01-26 179415 4.0 0.0 0.0
239 2018-01-27 125769 5.0 0.0 0.0
240 2018-01-28 133306 6.0 0.0 0.0
241 2018-01-29 194151 0.0 0.0 0.0
242 2018-01-30 198680 1.0 0.0 0.0
243 2018-01-31 198652 2.0 0.0 0.0
244 2018-02-01 195472 3.0 1.0 0.0
245 2018-02-02 183173 4.0 1.0 0.0
246 2018-02-03 124276 5.0 1.0 0.0
247 2018-02-04 129054 6.0 1.0 0.0
248 2018-02-05 190024 0.0 1.0 0.0
249 2018-02-06 198658 1.0 1.0 0.0
250 2018-02-07 198272 2.0 1.0 0.0
251 2018-02-08 195339 3.0 1.0 0.0
252 2018-02-09 183086 4.0 1.0 0.0
253 2018-02-10 122536 5.0 1.0 0.0
254 2018-02-11 133033 6.0 1.0 0.0
255 2018-02-12 185386 0.0 1.0 0.0
256 2018-02-13 184789 1.0 1.0 0.0
257 2018-02-14 176089 2.0 1.0 0.0
258 2018-02-15 171317 3.0 1.0 0.0
259 2018-02-16 162693 4.0 1.0 0.0
260 2018-02-17 116342 5.0 1.0 0.0
261 2018-02-18 122466 6.0 1.0 0.0
262 2018-02-19 172364 0.0 1.0 1.0
263 2018-02-20 185896 1.0 1.0 0.0
264 2018-02-21 188166 2.0 1.0 0.0
265 2018-02-22 189427 3.0 1.0 0.0
266 2018-02-23 178732 4.0 1.0 0.0
267 2018-02-24 132664 5.0 1.0 0.0
268 2018-02-25 134008 6.0 1.0 0.0
269 2018-02-26 200075 0.0 1.0 0.0
270 2018-02-27 207996 1.0 1.0 0.0
271 2018-02-28 204416 2.0 1.0 0.0
272 2018-03-01 201320 3.0 2.0 0.0
273 2018-03-02 188205 4.0 2.0 0.0
274 2018-03-03 131162 5.0 2.0 0.0
275 2018-03-04 138320 6.0 2.0 0.0
276 2018-03-05 207326 0.0 2.0 0.0
277 2018-03-06 212462 1.0 2.0 0.0
278 2018-03-07 209357 2.0 2.0 0.0
279 2018-03-08 194876 3.0 2.0 0.0
280 2018-03-09 193761 4.0 2.0 0.0
281 2018-03-10 133449 5.0 2.0 0.0
282 2018-03-11 142258 6.0 2.0 0.0
283 2018-03-12 208753 0.0 2.0 0.0
284 2018-03-13 210602 1.0 2.0 0.0
285 2018-03-14 214236 2.0 2.0 0.0
286 2018-03-15 210761 3.0 2.0 0.0
287 2018-03-16 196619 4.0 2.0 0.0
288 2018-03-17 133056 5.0 2.0 0.0
289 2018-03-18 141335 6.0 2.0 0.0
290 2018-03-19 211580 0.0 2.0 0.0
291 2018-03-20 219051 1.0 2.0 0.0
292 2018-03-21 215435 2.0 2.0 0.0
293 2018-03-22 211961 3.0 2.0 0.0
294 2018-03-23 196009 4.0 2.0 0.0
295 2018-03-24 132390 5.0 2.0 0.0
296 2018-03-25 140021 6.0 2.0 0.0
297 2018-03-26 205273 0.0 2.0 0.0
298 2018-03-27 212686 1.0 2.0 0.0
299 2018-03-28 210683 2.0 2.0 0.0
300 2018-03-29 189044 3.0 2.0 0.0
301 2018-03-30 170256 4.0 2.0 0.0
302 2018-03-31 125999 5.0 2.0 0.0
303 2018-04-01 126749 6.0 3.0 0.0
304 2018-04-02 186546 0.0 3.0 0.0
305 2018-04-03 207905 1.0 3.0 0.0
306 2018-04-04 201528 2.0 3.0 0.0
307 2018-04-05 188580 3.0 3.0 0.0
308 2018-04-06 173714 4.0 3.0 0.0
309 2018-04-07 125723 5.0 3.0 0.0
310 2018-04-08 142545 6.0 3.0 0.0
311 2018-04-09 204767 0.0 3.0 0.0
312 2018-04-10 212048 1.0 3.0 0.0
313 2018-04-11 210517 2.0 3.0 0.0
314 2018-04-12 206924 3.0 3.0 0.0
315 2018-04-13 191679 4.0 3.0 0.0
316 2018-04-14 126394 5.0 3.0 0.0
317 2018-04-15 137279 6.0 3.0 0.0
318 2018-04-16 208085 0.0 3.0 0.0
319 2018-04-17 213273 1.0 3.0 0.0
320 2018-04-18 211580 2.0 3.0 0.0
321 2018-04-19 206037 3.0 3.0 0.0
322 2018-04-20 191211 4.0 3.0 0.0
323 2018-04-21 125564 5.0 3.0 0.0
324 2018-04-22 136469 6.0 3.0 0.0
325 2018-04-23 206288 0.0 3.0 0.0
326 2018-04-24 212115 1.0 3.0 0.0
327 2018-04-25 207948 2.0 3.0 0.0
328 2018-04-26 205759 3.0 3.0 0.0
329 2018-04-27 181330 4.0 3.0 0.0
330 2018-04-28 130046 5.0 3.0 0.0
331 2018-04-29 120802 6.0 3.0 0.0
332 2018-04-30 170390 0.0 3.0 0.0
333 2018-05-01 169054 1.0 4.0 0.0
334 2018-05-02 197891 2.0 4.0 0.0
335 2018-05-03 199820 3.0 4.0 0.0
336 2018-05-04 186783 4.0 4.0 0.0
337 2018-05-05 124420 5.0 4.0 0.0
338 2018-05-06 130666 6.0 4.0 0.0
339 2018-05-07 196014 0.0 4.0 0.0
340 2018-05-08 203058 1.0 4.0 0.0
341 2018-05-09 198582 2.0 4.0 0.0
342 2018-05-10 191321 3.0 4.0 0.0
343 2018-05-11 183639 4.0 4.0 0.0
344 2018-05-12 122023 5.0 4.0 0.0
345 2018-05-13 128775 6.0 4.0 0.0
346 2018-05-14 199104 0.0 4.0 0.0
347 2018-05-15 200658 1.0 4.0 0.0
348 2018-05-16 201541 2.0 4.0 0.0
349 2018-05-17 196886 3.0 4.0 0.0
350 2018-05-18 188597 4.0 4.0 0.0
351 2018-05-19 121392 5.0 4.0 0.0
352 2018-05-20 126981 6.0 4.0 0.0
353 2018-05-21 189291 0.0 4.0 0.0
354 2018-05-22 203038 1.0 4.0 0.0
355 2018-05-23 205330 2.0 4.0 0.0
356 2018-05-24 199208 3.0 4.0 0.0
357 2018-05-25 187768 4.0 4.0 0.0
358 2018-05-26 117635 5.0 4.0 0.0
359 2018-05-27 124352 6.0 4.0 0.0
360 2018-05-28 180398 0.0 4.0 1.0
361 2018-05-29 194170 1.0 4.0 0.0
362 2018-05-30 200281 2.0 4.0 0.0
363 2018-05-31 197244 3.0 4.0 0.0
364 2018-06-01 184037 4.0 5.0 0.0
365 2018-06-02 121135 5.0 5.0 0.0
366 2018-06-03 129389 6.0 5.0 0.0
367 2018-06-04 200331 0.0 5.0 0.0
368 2018-06-05 207735 1.0 5.0 0.0
369 2018-06-06 203354 2.0 5.0 0.0
370 2018-06-07 200520 3.0 5.0 0.0
371 2018-06-08 182038 4.0 5.0 0.0
372 2018-06-09 120164 5.0 5.0 0.0
373 2018-06-10 125256 6.0 5.0 0.0
374 2018-06-11 194786 0.0 5.0 0.0
375 2018-06-12 200815 1.0 5.0 0.0
376 2018-06-13 197740 2.0 5.0 0.0
377 2018-06-14 192294 3.0 5.0 0.0
378 2018-06-15 173587 4.0 5.0 0.0
379 2018-06-16 105955 5.0 5.0 0.0
380 2018-06-17 110780 6.0 5.0 0.0
381 2018-06-18 174582 0.0 5.0 0.0
382 2018-06-19 193310 1.0 5.0 0.0
383 2018-06-20 193062 2.0 5.0 0.0
384 2018-06-21 187986 3.0 5.0 0.0
385 2018-06-22 173606 4.0 5.0 0.0
386 2018-06-23 111795 5.0 5.0 0.0
387 2018-06-24 116134 6.0 5.0 0.0
388 2018-06-25 185919 0.0 5.0 0.0
389 2018-06-26 193142 1.0 5.0 0.0
390 2018-06-27 188114 2.0 5.0 0.0
391 2018-06-28 183737 3.0 5.0 0.0
392 2018-06-29 171496 4.0 5.0 0.0
393 2018-06-30 107210 5.0 5.0 0.0
394 2018-07-01 111053 6.0 6.0 0.0
395 2018-07-02 176198 0.0 6.0 0.0
396 2018-07-03 184040 1.0 6.0 0.0
397 2018-07-04 169783 2.0 6.0 1.0
398 2018-07-05 177996 3.0 6.0 0.0
399 2018-07-06 167378 4.0 6.0 0.0
400 2018-07-07 106401 5.0 6.0 0.0
401 2018-07-08 112327 6.0 6.0 0.0
402 2018-07-09 182835 0.0 6.0 0.0
403 2018-07-10 187694 1.0 6.0 0.0
404 2018-07-11 185762 2.0 6.0 0.0
405 2018-07-12 184099 3.0 6.0 0.0
406 2018-07-13 170860 4.0 6.0 0.0
407 2018-07-14 106799 5.0 6.0 0.0
408 2018-07-15 108475 6.0 6.0 0.0
409 2018-07-16 175704 0.0 6.0 0.0
410 2018-07-17 183596 1.0 6.0 0.0
411 2018-07-18 179897 2.0 6.0 0.0
412 2018-07-19 183373 3.0 6.0 0.0
413 2018-07-20 169626 4.0 6.0 0.0
414 2018-07-21 106785 5.0 6.0 0.0
415 2018-07-22 112387 6.0 6.0 0.0
416 2018-07-23 180572 0.0 6.0 0.0
417 2018-07-24 186943 1.0 6.0 0.0
418 2018-07-25 185744 2.0 6.0 0.0
419 2018-07-26 183117 3.0 6.0 0.0
420 2018-07-27 168526 4.0 6.0 0.0
421 2018-07-28 105936 5.0 6.0 0.0
422 2018-07-29 111708 6.0 6.0 0.0
423 2018-07-30 179950 0.0 6.0 0.0
424 2018-07-31 185930 1.0 6.0 0.0
425 2018-08-01 183366 2.0 7.0 0.0
426 2018-08-02 182412 3.0 7.0 0.0
427 2018-08-03 173429 4.0 7.0 0.0
428 2018-08-04 106108 5.0 7.0 0.0
429 2018-08-05 110059 6.0 7.0 0.0
430 2018-08-06 178355 0.0 7.0 0.0
431 2018-08-07 185518 1.0 7.0 0.0
432 2018-08-08 183204 2.0 7.0 0.0
433 2018-08-09 181276 3.0 7.0 0.0
434 2018-08-10 168297 4.0 7.0 0.0
435 2018-08-11 106488 5.0 7.0 0.0
436 2018-08-12 111786 6.0 7.0 0.0
437 2018-08-13 178620 0.0 7.0 0.0
438 2018-08-14 181922 1.0 7.0 0.0
439 2018-08-15 172198 2.0 7.0 0.0
440 2018-08-16 177367 3.0 7.0 0.0
441 2018-08-17 166550 4.0 7.0 0.0
442 2018-08-18 107011 5.0 7.0 0.0
443 2018-08-19 112299 6.0 7.0 0.0
444 2018-08-20 176718 0.0 7.0 0.0
445 2018-08-21 182562 1.0 7.0 0.0
446 2018-08-22 181484 2.0 7.0 0.0
447 2018-08-23 180317 3.0 7.0 0.0
448 2018-08-24 170197 4.0 7.0 0.0
449 2018-08-25 109383 5.0 7.0 0.0
450 2018-08-26 113373 6.0 7.0 0.0
451 2018-08-27 180142 0.0 7.0 0.0
452 2018-08-28 191628 1.0 7.0 0.0
453 2018-08-29 191149 2.0 7.0 0.0
454 2018-08-30 187503 3.0 7.0 0.0
455 2018-08-31 172280 4.0 7.0 0.0

View File

@@ -0,0 +1,183 @@
import pandas as pd
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies
from azureml.train.estimator import Estimator
from azureml.core.run import Run
from azureml.automl.core.shared import constants
def split_fraction_by_grain(df, fraction, time_column_name, grain_column_names=None):
if not grain_column_names:
df["tmp_grain_column"] = "grain"
grain_column_names = ["tmp_grain_column"]
"""Group df by grain and split on last n rows for each group."""
df_grouped = df.sort_values(time_column_name).groupby(
grain_column_names, group_keys=False
)
df_head = df_grouped.apply(
lambda dfg: dfg.iloc[: -int(len(dfg) * fraction)] if fraction > 0 else dfg
)
df_tail = df_grouped.apply(
lambda dfg: dfg.iloc[-int(len(dfg) * fraction) :] if fraction > 0 else dfg[:0]
)
if "tmp_grain_column" in grain_column_names:
for df2 in (df, df_head, df_tail):
df2.drop("tmp_grain_column", axis=1, inplace=True)
grain_column_names.remove("tmp_grain_column")
return df_head, df_tail
def split_full_for_forecasting(
df, time_column_name, grain_column_names=None, test_split=0.2
):
index_name = df.index.name
# Assumes that there isn't already a column called tmpindex
df["tmpindex"] = df.index
train_df, test_df = split_fraction_by_grain(
df, test_split, time_column_name, grain_column_names
)
train_df = train_df.set_index("tmpindex")
train_df.index.name = index_name
test_df = test_df.set_index("tmpindex")
test_df.index.name = index_name
df.drop("tmpindex", axis=1, inplace=True)
return train_df, test_df
def get_result_df(remote_run):
children = list(remote_run.get_children(recursive=True))
summary_df = pd.DataFrame(
index=["run_id", "run_algorithm", "primary_metric", "Score"]
)
goal_minimize = False
for run in children:
if (
run.get_status().lower() == constants.RunState.COMPLETE_RUN
and "run_algorithm" in run.properties
and "score" in run.properties
):
# We only count in the completed child runs.
summary_df[run.id] = [
run.id,
run.properties["run_algorithm"],
run.properties["primary_metric"],
float(run.properties["score"]),
]
if "goal" in run.properties:
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
summary_df = summary_df.T.sort_values(
"Score", ascending=goal_minimize
).drop_duplicates(["run_algorithm"])
summary_df = summary_df.set_index("run_algorithm")
return summary_df
def run_inference(
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
):
model_base_name = "model.pkl"
if "model_data_location" in train_run.properties:
model_location = train_run.properties["model_data_location"]
_, model_base_name = model_location.rsplit("/", 1)
train_run.download_file(
"outputs/{}".format(model_base_name), "inference/{}".format(model_base_name)
)
train_run.download_file("outputs/conda_env_v_1_0_0.yml", "inference/condafile.yml")
inference_env = Environment("myenv")
inference_env.docker.enabled = True
inference_env.python.conda_dependencies = CondaDependencies(
conda_dependencies_file_path="inference/condafile.yml"
)
est = Estimator(
source_directory=script_folder,
entry_script="infer.py",
script_params={
"--max_horizon": max_horizon,
"--target_column_name": target_column_name,
"--time_column_name": time_column_name,
"--frequency": freq,
"--model_path": model_base_name,
},
inputs=[
test_dataset.as_named_input("test_data"),
lookback_dataset.as_named_input("lookback_data"),
],
compute_target=compute_target,
environment_definition=inference_env,
)
run = test_experiment.submit(
est,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags["run_algorithm"])
return run
def run_multiple_inferences(
summary_df,
train_experiment,
test_experiment,
compute_target,
script_folder,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
):
for run_name, run_summary in summary_df.iterrows():
print(run_name)
print(run_summary)
run_id = run_summary.run_id
train_run = Run(train_experiment, run_id)
test_run = run_inference(
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
)
print(test_run)
summary_df.loc[summary_df.run_id == run_id, "test_run_id"] = test_run.id
return summary_df

View File

@@ -0,0 +1,386 @@
import argparse
import os
import numpy as np
import pandas as pd
from pandas.tseries.frequencies import to_offset
from sklearn.externals import joblib
from sklearn.metrics import mean_absolute_error, mean_squared_error
from azureml.automl.runtime.shared.score import scoring, constants
from azureml.core import Run
try:
import torch
_torch_present = True
except ImportError:
_torch_present = False
def align_outputs(
y_predicted,
X_trans,
X_test,
y_test,
predicted_column_name="predicted",
horizon_colname="horizon_origin",
):
"""
Demonstrates how to get the output aligned to the inputs
using pandas indexes. Helps understand what happened if
the output's shape differs from the input shape, or if
the data got re-sorted by time and grain during forecasting.
Typical causes of misalignment are:
* we predicted some periods that were missing in actuals -> drop from eval
* model was asked to predict past max_horizon -> increase max horizon
* data at start of X_test was needed for lags -> provide previous periods
"""
if horizon_colname in X_trans:
df_fcst = pd.DataFrame(
{
predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname],
}
)
else:
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
# y and X outputs are aligned by forecast() function contract
df_fcst.index = X_trans.index
# align original X_test to y_test
X_test_full = X_test.copy()
X_test_full[target_column_name] = y_test
# X_test_full's index does not include origin, so reset for merge
df_fcst.reset_index(inplace=True)
X_test_full = X_test_full.reset_index().drop(columns="index")
together = df_fcst.merge(X_test_full, how="right")
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = together[
together[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
return clean
def do_rolling_forecast_with_lookback(
fitted_model, X_test, y_test, max_horizon, X_lookback, y_lookback, freq="D"
):
"""
Produce forecasts on a rolling origin over the given test set.
Each iteration makes a forecast for the next 'max_horizon' periods
with respect to the current origin, then advances the origin by the
horizon time duration. The prediction context for each forecast is set so
that the forecaster uses the actual target values prior to the current
origin time for constructing lag features.
This function returns a concatenated DataFrame of rolling forecasts.
"""
print("Using lookback of size: ", y_lookback.size)
df_list = []
origin_time = X_test[time_column_name].min()
X = X_lookback.append(X_test)
y = np.concatenate((y_lookback, y_test), axis=0)
while origin_time <= X_test[time_column_name].max():
# Set the horizon time - end date of the forecast
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = X[time_column_name] < horizon_time
X_test_expand = X[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN)
if origin_time != X[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = X[time_column_name] < origin_time
context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y[test_context_expand_wind]
# Print some debug info
print(
"Horizon_time:",
horizon_time,
" origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
print("X_test")
print(X)
print("X_test_expand")
print(X_test_expand)
print("Type of X_test_expand: ", type(X_test_expand))
print("Type of y_query_expand: ", type(y_query_expand))
print("y_query_expand")
print(y_query_expand)
# Make a forecast out to the maximum horizon
# y_fcst, X_trans = y_query_expand, X_test_expand
y_fcst, X_trans = fitted_model.forecast(X_test_expand, y_query_expand)
print("y_fcst")
print(y_fcst)
# Align forecast with test set for dates within
# the current rolling window
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
df_list.append(
align_outputs(
y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X[test_roll_wind],
y[test_roll_wind],
)
)
# Advance the origin time
origin_time = horizon_time
return pd.concat(df_list, ignore_index=True)
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq="D"):
"""
Produce forecasts on a rolling origin over the given test set.
Each iteration makes a forecast for the next 'max_horizon' periods
with respect to the current origin, then advances the origin by the
horizon time duration. The prediction context for each forecast is set so
that the forecaster uses the actual target values prior to the current
origin time for constructing lag features.
This function returns a concatenated DataFrame of rolling forecasts.
"""
df_list = []
origin_time = X_test[time_column_name].min()
while origin_time <= X_test[time_column_name].max():
# Set the horizon time - end date of the forecast
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = X_test[time_column_name] < horizon_time
X_test_expand = X_test[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN)
if origin_time != X_test[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = X_test[time_column_name] < origin_time
context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y_test[test_context_expand_wind]
# Print some debug info
print(
"Horizon_time:",
horizon_time,
" origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
print("X_test")
print(X_test)
print("X_test_expand")
print(X_test_expand)
print("Type of X_test_expand: ", type(X_test_expand))
print("Type of y_query_expand: ", type(y_query_expand))
print("y_query_expand")
print(y_query_expand)
# Make a forecast out to the maximum horizon
y_fcst, X_trans = fitted_model.forecast(X_test_expand, y_query_expand)
print("y_fcst")
print(y_fcst)
# Align forecast with test set for dates within the
# current rolling window
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X_test[time_column_name] >= origin_time)
df_list.append(
align_outputs(
y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X_test[test_roll_wind],
y_test[test_roll_wind],
)
)
# Advance the origin time
origin_time = horizon_time
return pd.concat(df_list, ignore_index=True)
def APE(actual, pred):
"""
Calculate absolute percentage error.
Returns a vector of APE values with same length as actual/pred.
"""
return 100 * np.abs((actual - pred) / actual)
def MAPE(actual, pred):
"""
Calculate mean absolute percentage error.
Remove NA and values where actual is close to zero
"""
not_na = ~(np.isnan(actual) | np.isnan(pred))
not_zero = ~np.isclose(actual, 0.0)
actual_safe = actual[not_na & not_zero]
pred_safe = pred[not_na & not_zero]
return np.mean(APE(actual_safe, pred_safe))
def map_location_cuda(storage, loc):
return storage.cuda()
parser = argparse.ArgumentParser()
parser.add_argument(
"--max_horizon",
type=int,
dest="max_horizon",
default=10,
help="Max Horizon for forecasting",
)
parser.add_argument(
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
"--time_column_name", type=str, dest="time_column_name", help="Time Column Name"
)
parser.add_argument(
"--frequency", type=str, dest="freq", help="Frequency of prediction"
)
parser.add_argument(
"--model_path",
type=str,
dest="model_path",
default="model.pkl",
help="Filename of model to be loaded",
)
args = parser.parse_args()
max_horizon = args.max_horizon
target_column_name = args.target_column_name
time_column_name = args.time_column_name
freq = args.freq
model_path = args.model_path
print("args passed are: ")
print(max_horizon)
print(target_column_name)
print(time_column_name)
print(freq)
print(model_path)
run = Run.get_context()
# get input dataset by name
test_dataset = run.input_datasets["test_data"]
lookback_dataset = run.input_datasets["lookback_data"]
grain_column_names = []
df = test_dataset.to_pandas_dataframe()
print("Read df")
print(df)
X_test_df = test_dataset.drop_columns(columns=[target_column_name])
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(
columns=[target_column_name]
)
X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name])
y_lookback_df = lookback_dataset.with_timestamp_columns(None).keep_columns(
columns=[target_column_name]
)
_, ext = os.path.splitext(model_path)
if ext == ".pt":
# Load the fc-tcn torch model.
assert _torch_present
if torch.cuda.is_available():
map_location = map_location_cuda
else:
map_location = "cpu"
with open(model_path, "rb") as fh:
fitted_model = torch.load(fh, map_location=map_location)
else:
# Load the sklearn pipeline.
fitted_model = joblib.load(model_path)
if hasattr(fitted_model, "get_lookback"):
lookback = fitted_model.get_lookback()
df_all = do_rolling_forecast_with_lookback(
fitted_model,
X_test_df.to_pandas_dataframe(),
y_test_df.to_pandas_dataframe().values.T[0],
max_horizon,
X_lookback_df.to_pandas_dataframe()[-lookback:],
y_lookback_df.to_pandas_dataframe().values.T[0][-lookback:],
freq,
)
else:
df_all = do_rolling_forecast(
fitted_model,
X_test_df.to_pandas_dataframe(),
y_test_df.to_pandas_dataframe().values.T[0],
max_horizon,
freq,
)
print(df_all)
print("target values:::")
print(df_all[target_column_name])
print("predicted values:::")
print(df_all["predicted"])
# Use the AutoML scoring module
regression_metrics = list(constants.REGRESSION_SCALAR_SET)
y_test = np.array(df_all[target_column_name])
y_pred = np.array(df_all["predicted"])
scores = scoring.score_regression(y_test, y_pred, regression_metrics)
print("scores:")
print(scores)
for key, value in scores.items():
run.log(key, value)
print("Simple forecasting model")
rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all["predicted"]))
print("[Test Data] \nRoot Mean squared error: %.2f" % rmse)
mae = mean_absolute_error(df_all[target_column_name], df_all["predicted"])
print("mean_absolute_error score: %.2f" % mae)
print("MAPE: %.2f" % MAPE(df_all[target_column_name], df_all["predicted"]))
run.log("rmse", rmse)
run.log("mae", mae)

View File

@@ -0,0 +1,94 @@
---
page_type: sample
languages:
- python
products:
- azure-machine-learning
description: Tutorial showing how to solve a complex machine learning time series forecasting problems at scale by using Azure Automated ML and Hierarchical time series accelerator.
---
## Microsoft Solution Accelerator: Hierachical Time Series Forecasting
In most applications, customers have a need to understand their forecasts at a macro and micro level of the business. Whether that be predicting sales of products at different geographic locations, or understanding the expected workforce demand for different organizations at a company, the ability to train a machine learning model to intelligently forecast on hierarchy data is essential.
This business pattern is common across a wide variety of industries and applicable to many real world use cases. Below are some examples of where the hierarchical time series pattern is useful.
| Industry | Scenario |
|----------------|--------------------------------------------|
| *Restaurant Chain* | Building demand forecasting models across thousands of restaurants and several countries. |
| *Retail Organization* | Building workforce optimization models for thousands of stores. |
| *Retail Organization*| Price optimization models for hundreds of thousands of products available. |
### Technical Summary
A hierarchical time series is a structure in which each of the unique series are arranged into a hierarchy based on dimensions such as geography, or product type. The table below shows an example of data whose unique attributes form a hierarchy. Our hierarchy is defined by the `product type` such as headphones or tablets, the `product category` which splits product types into accessories and devices, and the `region` the products are sold in. The table below demonstrates the first input of each unique series in the hierarchy.
![data-table](./media/data-table.png)
To further visualize this, the leaf levels of the hierarchy contain all the time series with unique combinations of attribute values. Each higher level in the hierarchy will consider one less dimension for defining the time series and will aggregate each set of `child nodes` from the lower level into a `parent node`.
![hierachy-sample](./media/hierarchy-sample-ms.PNG)
> **Note:** If no unique root level exists in the data, Automated Machine Learning will create a node `automl_top_level` for users to train or forecasts totals.
## Prerequisites
To use this solution accelerator, all you need is access to an [Azure subscription](https://azure.microsoft.com/free/) and an [Azure Machine Learning Workspace](https://docs.microsoft.com/azure/machine-learning/how-to-manage-workspace) that you'll create below.
A basic understanding of Azure Machine Learning and hierarchical time series concepts will be helpful for understanding the solution. The following resources can help introduce you to these concepts:
1. [Azure Machine Learning Overview](https://azure.microsoft.com/services/machine-learning/)
2. [Azure Machine Learning Tutorials](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup)
3. [Azure Machine Learning Sample Notebooks on Github](https://github.com/Azure/azureml-examples/)
4. [Forecasting: Principles and Practice, Hierarchical time series](https://otexts.com/fpp2/hts.html)
## Getting started
### 1. Set up the Compute Instance
Please create a [Compute Instance](https://docs.microsoft.com/en-us/azure/machine-learning/concept-compute-instance#create) and clone the git repo to your workspace.
### 2. Run the Notebook
Once your environment is set up, go to JupyterLab and run the notebook auto-ml-hierarchical-timeseries.ipynb on Compute Instance you created. It would run through the steps outlined sequentially. By the end, you'll know how to train, score, and make predictions using the hierarchical time series model pattern on Azure Machine Learning.
| Notebook | Description |
|----------------|--------------------------------------------|
| `auto-ml-forecasting-hierarchical-timeseries.ipynb`|Creates a pipeline to train machine learning models for the defined hierarchy and forecast at the desired hierarchy level using Automated ML. |
![Work Flow](./media/workflow.PNG)
## Key Concepts
### Automated Machine Learning
[Automated Machine Learning](https://docs.microsoft.com/azure/machine-learning/concept-automated-ml) also referred to as automated ML or AutoML, is the process of automating the time consuming, iterative tasks of machine learning model development. It allows data scientists, analysts, and developers to build ML models with high scale, efficiency, and productivity all while sustaining model quality.
### Pipelines
[Pipelines](https://docs.microsoft.com/azure/machine-learning/concept-ml-pipelines) allow you to create workflows in your machine learning projects. These workflows have a number of benefits including speed, simplicity, repeatability, and modularity.
### ParallelRunStep
[ParallelRunStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.parallel_run_step.parallelrunstep?view=azure-ml-py) enables the parallel training of models and is commonly used for batch inferencing. This [document](https://docs.microsoft.com/azure/machine-learning/how-to-use-parallel-run-step) walks through some of the key concepts around ParallelRunStep.
### Other Concepts
In additional to ParallelRunStep, Pipelines and Automated Machine Learning, you'll also be working with the following concepts including [workspace](https://docs.microsoft.com/azure/machine-learning/concept-workspace), [datasets](https://docs.microsoft.com/azure/machine-learning/concept-data#datasets), [compute targets](https://docs.microsoft.com/azure/machine-learning/concept-compute-target#train), [python script steps](https://docs.microsoft.com/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py), and [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/).
## Contributing
This project welcomes contributions and suggestions. To learn more visit the [contributing](CONTRIBUTING.md) section.
Most contributions require you to agree to a Contributor License Agreement (CLA)
declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

View File

@@ -0,0 +1,639 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Hierarchical Time Series - Automated ML\n",
"**_Generate hierarchical time series forecasts with Automated Machine Learning_**\n",
"\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
"\n",
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prerequisites\n",
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1.0 Set up workspace, datastore, experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003526897
}
},
"outputs": [],
"source": [
"import azureml.core\n",
"from azureml.core import Workspace, Datastore\n",
"import pandas as pd\n",
"\n",
"# Set up your workspace\n",
"ws = Workspace.from_config()\n",
"ws.get_details()\n",
"\n",
"# Set up your datastores\n",
"dstore = ws.get_default_datastore()\n",
"\n",
"output = {}\n",
"output[\"SDK version\"] = azureml.core.VERSION\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Default datastore name\"] = dstore.name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose an experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003540729
}
},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment = Experiment(ws, \"automl-hts\")\n",
"\n",
"print(\"Experiment name: \" + experiment.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2.0 Data\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"nteract": {
"transient": {
"deleting": false
}
}
},
"source": [
"### Upload local csv files to datastore\n",
"You can upload your train and inference csv files to the default datastore in your workspace. \n",
"\n",
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore.datastore?view=azure-ml-py) documentation on how to access data from Datastore."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"datastore_path = \"hts-sample\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"datastore = ws.get_default_datastore()\n",
"datastore"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create the TabularDatasets \n",
"\n",
"Datasets in Azure Machine Learning are references to specific data in a Datastore. The data can be retrieved as a [TabularDatasets](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py). We will read in the data as a pandas DataFrame, upload to the data store and register them to your Workspace using ```register_pandas_dataframe``` so they can be called as an input into the training pipeline. We will use the inference dataset as part of the forecasting pipeline. The step need only be completed once."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007017296
}
},
"outputs": [],
"source": [
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"registered_train = TabularDatasetFactory.register_pandas_dataframe(\n",
" pd.read_csv(\"Data/hts-sample-train.csv\"),\n",
" target=(datastore, \"hts-sample\"),\n",
" name=\"hts-sales-train\",\n",
")\n",
"registered_inference = TabularDatasetFactory.register_pandas_dataframe(\n",
" pd.read_csv(\"Data/hts-sample-test.csv\"),\n",
" target=(datastore, \"hts-sample\"),\n",
" name=\"hts-sales-test\",\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3.0 Build the training pipeline\n",
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose a compute target\n",
"\n",
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"\n",
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
"\n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007037308
}
},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"\n",
"# Name your cluster\n",
"compute_name = \"hts-compute\"\n",
"\n",
"\n",
"if compute_name in ws.compute_targets:\n",
" compute_target = ws.compute_targets[compute_name]\n",
" if compute_target and type(compute_target) is AmlCompute:\n",
" print(\"Found compute target: \" + compute_name)\n",
"else:\n",
" print(\"Creating a new compute target...\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_D16S_V3\", max_nodes=20\n",
" )\n",
" # Create the compute target\n",
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
"\n",
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
" # If no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
" )\n",
"\n",
" # For a more detailed view of current cluster status, use the 'status' property\n",
" print(compute_target.status.serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up training parameters\n",
"\n",
"This dictionary defines the AutoML and hierarchy settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, the hierarchy definition, and the level of the hierarchy at which to train.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n",
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
"| **label_column_name** | The name of the label column. |\n",
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **hierarchy_column_names** | The names of columns that define the hierarchical structure of the data from highest level to most granular. |\n",
"| **training_level** | The level of the hierarchy to be used for training models. |\n",
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
"| **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
"| **model_explainability** | Flag to disable explaining the best automated ML model at the end of all training iterations. The default is True and will block non-explainable models which may impact the forecast accuracy. For more information, see [Interpretability: model explanations in automated machine learning](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-machine-learning-interpretability-automl). |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007061544
}
},
"outputs": [],
"source": [
"from azureml.train.automl.runtime._hts.hts_parameters import HTSTrainParameters\n",
"\n",
"model_explainability = True\n",
"\n",
"engineered_explanations = False\n",
"# Define your hierarchy. Adjust the settings below based on your dataset.\n",
"hierarchy = [\"state\", \"store_id\", \"product_category\", \"SKU\"]\n",
"training_level = \"SKU\"\n",
"\n",
"# Set your forecast parameters. Adjust the settings below based on your dataset.\n",
"time_column_name = \"date\"\n",
"label_column_name = \"quantity\"\n",
"forecast_horizon = 7\n",
"\n",
"\n",
"automl_settings = {\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"label_column_name\": label_column_name,\n",
" \"time_column_name\": time_column_name,\n",
" \"forecast_horizon\": forecast_horizon,\n",
" \"hierarchy_column_names\": hierarchy,\n",
" \"hierarchy_training_level\": training_level,\n",
" \"track_child_runs\": False,\n",
" \"pipeline_fetch_max_batch_size\": 15,\n",
" \"model_explainability\": model_explainability,\n",
" # The following settings are specific to this sample and should be adjusted according to your own needs.\n",
" \"iteration_timeout_minutes\": 10,\n",
" \"iterations\": 10,\n",
" \"n_cross_validations\": 2,\n",
"}\n",
"\n",
"hts_parameters = HTSTrainParameters(\n",
" automl_settings=automl_settings,\n",
" hierarchy_column_names=hierarchy,\n",
" training_level=training_level,\n",
" enable_engineered_explanations=engineered_explanations,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up hierarchy training pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Parallel run step is leveraged to train the hierarchy. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The `process_count_per_node` is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
"\n",
"* **experiment:** The experiment used for training.\n",
"* **train_data:** The tabular dataset to be used as input to the training run.\n",
"* **node_count:** The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long.\n",
"* **process_count_per_node:** Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance.\n",
"* **train_pipeline_parameters:** The set of configuration parameters defined in the previous section. \n",
"\n",
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"\n",
"\n",
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
" experiment=experiment,\n",
" train_data=registered_train,\n",
" compute_target=compute_target,\n",
" node_count=2,\n",
" process_count_per_node=8,\n",
" train_pipeline_parameters=hts_parameters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit the pipeline to run\n",
"Next we submit our pipeline to run. The whole training pipeline takes about 1h using a Standard_D16_V3 VM with our current ParallelRunConfig setting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run = experiment.submit(training_pipeline)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### [Optional] Get the explanations\n",
"First we need to download the explanations to the local disk."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"if model_explainability:\n",
" expl_output = training_run.get_pipeline_output(\"explanations\")\n",
" expl_output.download(\"training_explanations\")\n",
"else:\n",
" print(\n",
" \"Model explanations are available only if model_explainability is set to True.\"\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The explanations are downloaded to the \"training_explanations/azureml\" directory."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"if model_explainability:\n",
" explanations_dirrectory = os.listdir(\n",
" os.path.join(\"training_explanations\", \"azureml\")\n",
" )\n",
" if len(explanations_dirrectory) > 1:\n",
" print(\n",
" \"Warning! The directory contains multiple explanations, only the first one will be displayed.\"\n",
" )\n",
" print(\"The explanations are located at {}.\".format(explanations_dirrectory[0]))\n",
" # Now we will list all the explanations.\n",
" explanation_path = os.path.join(\n",
" \"training_explanations\",\n",
" \"azureml\",\n",
" explanations_dirrectory[0],\n",
" \"training_explanations\",\n",
" )\n",
" print(\"Available explanations\")\n",
" print(\"==============================\")\n",
" print(\"\\n\".join(os.listdir(explanation_path)))\n",
"else:\n",
" print(\n",
" \"Model explanations are available only if model_explainability is set to True.\"\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"View the explanations on \"state\" level."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import display\n",
"\n",
"explanation_type = \"raw\"\n",
"level = \"state\"\n",
"\n",
"if model_explainability:\n",
" display(\n",
" pd.read_csv(\n",
" os.path.join(explanation_path, \"{}_explanations_{}.csv\").format(\n",
" explanation_type, level\n",
" )\n",
" )\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 5.0 Forecasting\n",
"For hierarchical forecasting we need to provide the HTSInferenceParameters object.\n",
"#### HTSInferenceParameters arguments\n",
"* **hierarchy_forecast_level:** The default level of the hierarchy to produce prediction/forecast on.\n",
"* **allocation_method:** \\[Optional] The disaggregation method to use if the hierarchy forecast level specified is below the define hierarchy training level. <br><i>(average historical proportions) 'average_historical_proportions'</i><br><i>(proportions of the historical averages) 'proportions_of_historical_average'</i>\n",
"\n",
"#### get_many_models_batch_inference_steps arguments\n",
"* **experiment:** The experiment used for inference run.\n",
"* **inference_data:** The data to use for inferencing. It should be the same schema as used for training.\n",
"* **compute_target:** The compute target that runs the inference pipeline.\n",
"* **node_count:** The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku).\n",
"* **process_count_per_node:** The number of processes per node.\n",
"* **train_run_id:** \\[Optional] The run id of the hierarchy training, by default it is the latest successful training hts run in the experiment.\n",
"* **train_experiment_name:** \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline.\n",
"* **process_count_per_node:** \\[Optional] The number of processes per node, by default it's 4."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.train.automl.runtime._hts.hts_parameters import HTSInferenceParameters\n",
"\n",
"inference_parameters = HTSInferenceParameters(\n",
" hierarchy_forecast_level=\"store_id\", # The setting is specific to this dataset and should be changed based on your dataset.\n",
" allocation_method=\"proportions_of_historical_average\",\n",
")\n",
"\n",
"steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
" experiment=experiment,\n",
" inference_data=registered_inference,\n",
" compute_target=compute_target,\n",
" inference_pipeline_parameters=inference_parameters,\n",
" node_count=2,\n",
" process_count_per_node=8,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"inference_pipeline = Pipeline(ws, steps=steps)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"inference_run = experiment.submit(inference_pipeline)\n",
"inference_run.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Retrieve results\n",
"\n",
"Forecast results can be retrieved through the following code. The prediction results summary and the actual predictions are downloaded in forecast_results folder"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"forecasts = inference_run.get_pipeline_output(\"forecasts\")\n",
"forecasts.download(\"forecast_results\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Resbumit the Pipeline\n",
"\n",
"The inference pipeline can be submitted with different configurations."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"inference_run = experiment.submit(\n",
" inference_pipeline, pipeline_parameters={\"hierarchy_forecast_level\": \"state\"}\n",
")\n",
"inference_run.wait_for_completion(show_output=False)"
]
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"categories": [
"how-to-use-azureml",
"automated-machine-learning"
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-hierarchical-timeseries
dependencies:
- pip:
- azureml-sdk

Binary file not shown.

After

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 165 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

View File

@@ -0,0 +1,3 @@
dependencies:
- pip:
- azureml-contrib-automl-pipeline-steps

View File

@@ -0,0 +1,122 @@
---
page_type: sample
languages:
- python
products:
- azure-machine-learning
description: Tutorial showing how to solve a complex machine learning time series forecasting problems at scale by using Azure Automated ML and Many Models solution accelerator.
---
![Many Models Solution Accelerator Banner](images/mmsa.png)
# Many Models Solution Accelerator
<!--
Guidelines on README format: https://review.docs.microsoft.com/help/onboard/admin/samples/concepts/readme-template?branch=master
Guidance on onboarding samples to docs.microsoft.com/samples: https://review.docs.microsoft.com/help/onboard/admin/samples/process/onboarding?branch=master
Taxonomies for products and languages: https://review.docs.microsoft.com/new-hope/information-architecture/metadata/taxonomies?branch=master
-->
In the real world, many problems can be too complex to be solved by a single machine learning model. Whether that be predicting sales for each individual store, building a predictive maintanence model for hundreds of oil wells, or tailoring an experience to individual users, building a model for each instance can lead to improved results on many machine learning problems.
This Pattern is very common across a wide variety of industries and applicable to many real world use cases. Below are some examples we have seen where this pattern is being used.
- Energy and utility companies building predictive maintenancemodelsforthousands of oil wells, hundreds of wind turbines or hundreds of smart meters
- Retail organizations building workforce optimization models for thousands of stores, campaign promotion propensity models, Price optimization models for hundreds of thousands of products they sell
- Restaurant chains buildingdemand forecasting models across thousands ofrestaurants
- Banks and financial institutes building models for cash replenishmentfor ATM Machine and for several ATMsor building personalized models for individuals
- Enterprises building revenue forecasting modelsat each division level
- Document management companies building text analytics and legal document search models per each state
Azure Machine Learning (AML) makes it easy to train, operate, and manage hundreds or even thousands of models. This repo will walk you through the end to end process of creating a many models solution from training to scoring to monitoring.
## Prerequisites
To use this solution accelerator, all you need is access to an [Azure subscription](https://azure.microsoft.com/free/) and an [Azure Machine Learning Workspace](https://docs.microsoft.com/azure/machine-learning/how-to-manage-workspace) that you'll create below.
While it's not required, a basic understanding of Azure Machine Learning will be helpful for understanding the solution. The following resources can help introduce you to AML:
1. [Azure Machine Learning Overview](https://azure.microsoft.com/services/machine-learning/)
2. [Azure Machine Learning Tutorials](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup)
3. [Azure Machine Learning Sample Notebooks on Github](https://github.com/Azure/azureml-examples)
## Getting started
### 1. Deploy Resources
Start by deploying the resources to Azure. The button below will deploy Azure Machine Learning and its related resources:
<a href="https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2Fmicrosoft%2Fsolution-accelerator-many-models%2Fmaster%2Fazuredeploy.json" target="_blank">
<img src="http://azuredeploy.net/deploybutton.png"/>
</a>
### 2. Configure Development Environment
Next you'll need to configure your [development environment](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment) for Azure Machine Learning. We recommend using a [Compute Instance](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment#compute-instance) as it's the fastest way to get up and running.
### 3. Run Notebooks
Once your development environment is set up, run through the Jupyter Notebooks sequentially following the steps outlined. By the end, you'll know how to train, score, and make predictions using the many models pattern on Azure Machine Learning.
![Sequence of Notebooks](./images/mmsa-overview.png)
## Contents
In this repo, you'll train and score a forecasting model for each orange juice brand and for each store at a (simulated) grocery chain. By the end, you'll have forecasted sales by using up to 11,973 models to predict sales for the next few weeks.
The data used in this sample is simulated based on the [Dominick's Orange Juice Dataset](http://www.cs.unitn.it/~taufer/QMMA/L10-OJ-Data.html#(1)), sales data from a Chicago area grocery store.
<img src="images/Flow_map.png" width="1000">
### Using Automated ML to train the models:
The [`auto-ml-forecasting-many-models.ipynb`](./auto-ml-forecasting-many-models.ipynb) noteboook is a guided solution accelerator that demonstrates steps from data preparation, to model training, and forecasting on train models as well as operationalizing the solution.
## How-to-videos
Watch these how-to-videos for a step by step walk-through of the many model solution accelerator to learn how to setup your models using Automated ML.
### Automated ML
[![Watch the video](https://media.giphy.com/media/dWUKfameudyNGRnp1t/giphy.gif)](https://channel9.msdn.com/Shows/Docs-AI/Building-Large-Scale-Machine-Learning-Forecasting-Models-using-Azure-Machine-Learnings-Automated-ML)
## Key concepts
### ParallelRunStep
[ParallelRunStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.parallel_run_step.parallelrunstep?view=azure-ml-py) enables the parallel training of models and is commonly used for batch inferencing. This [document](https://docs.microsoft.com/azure/machine-learning/how-to-use-parallel-run-step) walks through some of the key concepts around ParallelRunStep.
### Pipelines
[Pipelines](https://docs.microsoft.com/azure/machine-learning/concept-ml-pipelines) allow you to create workflows in your machine learning projects. These workflows have a number of benefits including speed, simplicity, repeatability, and modularity.
### Automated Machine Learning
[Automated Machine Learning](https://docs.microsoft.com/azure/machine-learning/concept-automated-ml) also referred to as automated ML or AutoML, is the process of automating the time consuming, iterative tasks of machine learning model development. It allows data scientists, analysts, and developers to build ML models with high scale, efficiency, and productivity all while sustaining model quality.
### Other Concepts
In additional to ParallelRunStep, Pipelines and Automated Machine Learning, you'll also be working with the following concepts including [workspace](https://docs.microsoft.com/azure/machine-learning/concept-workspace), [datasets](https://docs.microsoft.com/azure/machine-learning/concept-data#datasets), [compute targets](https://docs.microsoft.com/azure/machine-learning/concept-compute-target#train), [python script steps](https://docs.microsoft.com/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py), and [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/).
## Contributing
This project welcomes contributions and suggestions. To learn more visit the [contributing](../../../CONTRIBUTING.md) section.
Most contributions require you to agree to a Contributor License Agreement (CLA)
declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

View File

@@ -0,0 +1,746 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Many Models - Automated ML\n",
"**_Generate many models time series forecasts with Automated Machine Learning_**\n",
"\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
"\n",
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prerequisites\n",
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1.0 Set up workspace, datastore, experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003526897
}
},
"outputs": [],
"source": [
"import azureml.core\n",
"from azureml.core import Workspace, Datastore\n",
"import pandas as pd\n",
"\n",
"# Set up your workspace\n",
"ws = Workspace.from_config()\n",
"ws.get_details()\n",
"\n",
"# Set up your datastores\n",
"dstore = ws.get_default_datastore()\n",
"\n",
"output = {}\n",
"output[\"SDK version\"] = azureml.core.VERSION\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Default datastore name\"] = dstore.name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose an experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003540729
}
},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment = Experiment(ws, \"automl-many-models\")\n",
"\n",
"print(\"Experiment name: \" + experiment.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2.0 Data\n",
"\n",
"This notebook uses simulated orange juice sales data to walk you through the process of training many models on Azure Machine Learning using Automated ML. \n",
"\n",
"The time series data used in this example was simulated based on the University of Chicago's Dominick's Finer Foods dataset which featured two years of sales of 3 different orange juice brands for individual stores. The full simulated dataset includes 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern.\n",
"\n",
" \n",
"In this notebook, two datasets will be created: one with all 11,973 files and one with only 10 files that can be used to quickly test and debug. For each dataset, you'll be walked through the process of:\n",
"\n",
"1. Registering the blob container as a Datastore to the Workspace\n",
"2. Registering a tabular dataset to the Workspace"
]
},
{
"cell_type": "markdown",
"metadata": {
"nteract": {
"transient": {
"deleting": false
}
}
},
"source": [
"### 2.1 Data Preparation\n",
"The OJ data is available in the public blob container. The data is split to be used for training and for inferencing. For the current dataset, the data was split on time column ('WeekStarting') before and after '1992-5-28' .\n",
"\n",
"The container has\n",
"<ol>\n",
" <li><b>'oj-data-tabular'</b> and <b>'oj-inference-tabular'</b> folders that contains training and inference data respectively for the 11,973 models. </li>\n",
" <li>It also has <b>'oj-data-small-tabular'</b> and <b>'oj-inference-small-tabular'</b> folders that has training and inference data for 10 models.</li>\n",
"</ol>\n",
"\n",
"To create the [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) needed for the ParallelRunStep, you first need to register the blob container to the workspace."
]
},
{
"cell_type": "markdown",
"metadata": {
"nteract": {
"transient": {
"deleting": false
}
}
},
"source": [
"<b> To use your own data, put your own data in a blobstore folder. As shown it can be one file or multiple files. We can then register datastore using that blob as shown below.\n",
" \n",
"<h3> How sample data in blob store looks like</h3>\n",
"\n",
"['oj-data-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)</b>\n",
"![image-4.png](mm-1.png)\n",
"\n",
"['oj-inference-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
"![image-3.png](mm-2.png)\n",
"\n",
"['oj-data-small-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
"\n",
"![image-5.png](mm-3.png)\n",
"\n",
"['oj-inference-small-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
"![image-6.png](mm-4.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### 2.2 Register the blob container as DataStore\n",
"\n",
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
"\n",
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.\n",
"\n",
"In this next step, we will be registering blob storage as datastore to the Workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Datastore\n",
"\n",
"# Please change the following to point to your own blob container and pass in account_key\n",
"blob_datastore_name = \"automl_many_models\"\n",
"container_name = \"automl-sample-notebook-data\"\n",
"account_name = \"automlsamplenotebookdata\"\n",
"\n",
"oj_datastore = Datastore.register_azure_blob_container(\n",
" workspace=ws,\n",
" datastore_name=blob_datastore_name,\n",
" container_name=container_name,\n",
" account_name=account_name,\n",
" create_if_not_exists=True,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### 2.3 Using tabular datasets \n",
"\n",
"Now that the datastore is available from the Workspace, [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) can be created. Datasets in Azure Machine Learning are references to specific data in a Datastore. We are using TabularDataset, so that users who have their data which can be in one or many files (*.parquet or *.csv) and have not split up data according to group columns needed for training, can do so using out of box support for 'partiion_by' feature of TabularDataset shown in section 5.0 below."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007017296
}
},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"\n",
"ds_name_small = \"oj-data-small-tabular\"\n",
"input_ds_small = Dataset.Tabular.from_delimited_files(\n",
" path=oj_datastore.path(ds_name_small + \"/\"), validate=False\n",
")\n",
"\n",
"inference_name_small = \"oj-inference-small-tabular\"\n",
"inference_ds_small = Dataset.Tabular.from_delimited_files(\n",
" path=oj_datastore.path(inference_name_small + \"/\"), validate=False\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3.0 Build the training pipeline\n",
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose a compute target\n",
"\n",
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"\n",
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
"\n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007037308
}
},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"\n",
"# Name your cluster\n",
"compute_name = \"mm-compute\"\n",
"\n",
"\n",
"if compute_name in ws.compute_targets:\n",
" compute_target = ws.compute_targets[compute_name]\n",
" if compute_target and type(compute_target) is AmlCompute:\n",
" print(\"Found compute target: \" + compute_name)\n",
"else:\n",
" print(\"Creating a new compute target...\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_D16S_V3\", max_nodes=20\n",
" )\n",
" # Create the compute target\n",
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
"\n",
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
" # If no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
" )\n",
"\n",
" # For a more detailed view of current cluster status, use the 'status' property\n",
" print(compute_target.status.serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up training parameters\n",
"\n",
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n",
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
"| **label_column_name** | The name of the label column. |\n",
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
"| **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007061544
}
},
"outputs": [],
"source": [
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsTrainParameters,\n",
")\n",
"\n",
"partition_column_names = [\"Store\", \"Brand\"]\n",
"automl_settings = {\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
" \"iterations\": 15,\n",
" \"experiment_timeout_hours\": 0.25,\n",
" \"label_column_name\": \"Quantity\",\n",
" \"n_cross_validations\": 3,\n",
" \"time_column_name\": \"WeekStarting\",\n",
" \"drop_column_names\": \"Revenue\",\n",
" \"max_horizon\": 6,\n",
" \"grain_column_names\": partition_column_names,\n",
" \"track_child_runs\": False,\n",
"}\n",
"\n",
"mm_paramters = ManyModelsTrainParameters(\n",
" automl_settings=automl_settings, partition_column_names=partition_column_names\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up many models pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for training. |\n",
"| **train_data** | The file dataset to be used as input to the training run. |\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. |\n",
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
"\n",
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"\n",
"\n",
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
" experiment=experiment,\n",
" train_data=input_ds_small,\n",
" compute_target=compute_target,\n",
" node_count=2,\n",
" process_count_per_node=8,\n",
" run_invocation_timeout=920,\n",
" train_pipeline_parameters=mm_paramters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit the pipeline to run\n",
"Next we submit our pipeline to run. The whole training pipeline takes about 40m using a STANDARD_D16S_V3 VM with our current ParallelRunConfig setting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run = experiment.submit(training_pipeline)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 5.0 Publish and schedule the train pipeline (Optional)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 5.1 Publish the pipeline\n",
"\n",
"Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# published_pipeline = training_pipeline.publish(name = 'automl_train_many_models',\n",
"# description = 'train many models',\n",
"# version = '1',\n",
"# continue_on_step_failure = False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 7.2 Schedule the pipeline\n",
"You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
"\n",
"# training_pipeline_id = published_pipeline.id\n",
"\n",
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
"# recurring_schedule = Schedule.create(ws, name=\"automl_training_recurring_schedule\",\n",
"# description=\"Schedule Training Pipeline to run on the first day of every month\",\n",
"# pipeline_id=training_pipeline_id,\n",
"# experiment_name=experiment.name,\n",
"# recurrence=recurrence)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 6.0 Forecasting"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up output dataset for inference data\n",
"Output of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data import OutputFileDatasetConfig\n",
"\n",
"output_inference_data_ds = OutputFileDatasetConfig(\n",
" name=\"many_models_inference_output\", destination=(dstore, \"oj/inference_data/\")\n",
").register_on_complete(name=\"oj_inference_data_ds\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For many models we need to provide the ManyModelsInferenceParameters object.\n",
"\n",
"#### ManyModelsInferenceParameters arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **partition_column_names** | List of column names that identifies groups. |\n",
"| **target_column_name** | \\[Optional] Column name only if the inference dataset has the target. |\n",
"| **time_column_name** | \\[Optional] Column name only if it is timeseries. |\n",
"| **many_models_run_id** | \\[Optional] Many models run id where models were trained. |\n",
"\n",
"#### get_many_models_batch_inference_steps arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for inference run. |\n",
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
"| **compute_target** The compute target that runs the inference pipeline.|\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
"| **process_count_per_node** The number of processes per node.\n",
"| **train_run_id** | \\[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n",
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
"| **process_count_per_node** | \\[Optional] The number of processes per node, by default it's 4. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsInferenceParameters,\n",
")\n",
"\n",
"mm_parameters = ManyModelsInferenceParameters(\n",
" partition_column_names=[\"Store\", \"Brand\"],\n",
" time_column_name=\"WeekStarting\",\n",
" target_column_name=\"Quantity\",\n",
")\n",
"\n",
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
" experiment=experiment,\n",
" inference_data=inference_ds_small,\n",
" node_count=2,\n",
" process_count_per_node=8,\n",
" compute_target=compute_target,\n",
" run_invocation_timeout=300,\n",
" output_datastore=output_inference_data_ds,\n",
" train_run_id=training_run.id,\n",
" train_experiment_name=training_run.experiment.name,\n",
" inference_pipeline_parameters=mm_parameters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"inference_pipeline = Pipeline(ws, steps=inference_steps)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"inference_run = experiment.submit(inference_pipeline)\n",
"inference_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Retrieve results\n",
"\n",
"The forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. \n",
"\n",
"The following code snippet:\n",
"1. Downloads the contents of the output folder that is passed in the parallel run step \n",
"2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe and \n",
"3. Displays the top 10 rows of the predictions"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n",
"\n",
"forecasting_results_name = \"forecasting_results\"\n",
"forecasting_output_name = \"many_models_inference_output\"\n",
"forecast_file = get_output_from_mm_pipeline(\n",
" inference_run, forecasting_results_name, forecasting_output_name\n",
")\n",
"df = pd.read_csv(forecast_file, delimiter=\" \", header=None)\n",
"df.columns = [\n",
" \"Week Starting\",\n",
" \"Store\",\n",
" \"Brand\",\n",
" \"Quantity\",\n",
" \"Advert\",\n",
" \"Price\",\n",
" \"Revenue\",\n",
" \"Predicted\",\n",
"]\n",
"print(\n",
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
")\n",
"df.head(10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 7.0 Publish and schedule the inference pipeline (Optional)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 7.1 Publish the pipeline\n",
"\n",
"Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# published_pipeline_inf = inference_pipeline.publish(name = 'automl_forecast_many_models',\n",
"# description = 'forecast many models',\n",
"# version = '1',\n",
"# continue_on_step_failure = False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 7.2 Schedule the pipeline\n",
"You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
"\n",
"# forecasting_pipeline_id = published_pipeline.id\n",
"\n",
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
"# recurring_schedule = Schedule.create(ws, name=\"automl_forecasting_recurring_schedule\",\n",
"# description=\"Schedule Forecasting Pipeline to run on the first day of every week\",\n",
"# pipeline_id=forecasting_pipeline_id,\n",
"# experiment_name=experiment.name,\n",
"# recurrence=recurrence)"
]
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"categories": [
"how-to-use-azureml",
"automated-machine-learning"
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-many-models
dependencies:
- pip:
- azureml-sdk

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 306 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 158 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 631 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 176 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 165 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 166 KiB

View File

@@ -0,0 +1,3 @@
dependencies:
- pip:
- azureml-contrib-automl-pipeline-steps

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-orange-juice-sales
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,61 @@
"""
This is the script that is executed on the compute instance. It relies
on the model.pkl file which is uploaded along with this script to the
compute instance.
"""
import argparse
from azureml.core import Dataset, Run
from sklearn.externals import joblib
from pandas.tseries.frequencies import to_offset
parser = argparse.ArgumentParser()
parser.add_argument(
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
)
args = parser.parse_args()
target_column_name = args.target_column_name
test_dataset_id = args.test_dataset
run = Run.get_context()
ws = run.experiment.workspace
# get the input dataset by id
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
X_test = test_dataset.to_pandas_dataframe().reset_index(drop=True)
y_test = X_test.pop(target_column_name).values
# generate forecast
fitted_model = joblib.load("model.pkl")
# We have default quantiles values set as below(95th percentile)
quantiles = [0.025, 0.5, 0.975]
predicted_column_name = "predicted"
PI = "prediction_interval"
fitted_model.quantiles = quantiles
pred_quantiles = fitted_model.forecast_quantiles(X_test)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
)
X_test[target_column_name] = y_test
X_test[PI] = pred_quantiles[PI]
X_test[predicted_column_name] = pred_quantiles[0.5]
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = X_test[
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
file_name = "outputs/predictions.csv"
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
# Upload the predictions into artifacts
run.upload_file(name=file_name, path_or_stream=file_name)

View File

@@ -0,0 +1,49 @@
import os
import shutil
from azureml.core import ScriptRunConfig
def run_remote_inference(
test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
# Create local directory to copy the model.pkl and forecsting_script.py files into.
# These files will be uploaded to and executed on the compute instance.
os.makedirs(inference_folder, exist_ok=True)
shutil.copy("forecasting_script.py", inference_folder)
train_run.download_file(
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
)
inference_env = train_run.get_environment()
config = ScriptRunConfig(
source_directory=inference_folder,
script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name,
"--test_dataset",
test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target,
environment=inference_env,
)
run = test_experiment.submit(
config,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags["run_algorithm"])
return run

Some files were not shown because too many files have changed in this diff Show More