Compare commits
192 Commits
minxia/dis
...
jeffshep/u
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
113543dfbf | ||
|
|
f0d7379af7 | ||
|
|
33ca8c7933 | ||
|
|
3fd1ce8993 | ||
|
|
aa93588190 | ||
|
|
12520400e5 | ||
|
|
35614e83fa | ||
|
|
ff22ac01cc | ||
|
|
e7dd826f34 | ||
|
|
fcc882174b | ||
|
|
6872d8a3bb | ||
|
|
a2cb4c3589 | ||
|
|
15008962b2 | ||
|
|
9414b51fac | ||
|
|
80ac414582 | ||
|
|
cbc151660b | ||
|
|
0024abc6e3 | ||
|
|
fa13385860 | ||
|
|
0c5f6daf52 | ||
|
|
c11e9fc1da | ||
|
|
280150713e | ||
|
|
bb11c80b1b | ||
|
|
d0961b98bf | ||
|
|
302589b7f9 | ||
|
|
cc85949d6d | ||
|
|
3a1824e3ad | ||
|
|
579643326d | ||
|
|
14f76f227e | ||
|
|
25baf5203a | ||
|
|
1178fcb0ba | ||
|
|
e4d84c8e45 | ||
|
|
7a3ab1e44c | ||
|
|
598a293dfa | ||
|
|
40b3068462 | ||
|
|
0ecbbbce75 | ||
|
|
9b1e130d18 | ||
|
|
0e17b33d2a | ||
|
|
34d80abd26 | ||
|
|
249278ab77 | ||
|
|
25fdb17f80 | ||
|
|
3a02a27f1e | ||
|
|
4eed9d529f | ||
|
|
f344d410a2 | ||
|
|
9dc1228063 | ||
|
|
4404e62f58 | ||
|
|
38d5743bbb | ||
|
|
0814eee151 | ||
|
|
f45b815221 | ||
|
|
bd629ae454 | ||
|
|
41de75a584 | ||
|
|
96a426dc36 | ||
|
|
824dd40f7e | ||
|
|
fa2e649fe8 | ||
|
|
e25e8e3a41 | ||
|
|
aa3670a902 | ||
|
|
ef1f9205ac | ||
|
|
3228bbfc63 | ||
|
|
f18a0dfc4d | ||
|
|
badb620261 | ||
|
|
acf46100ae | ||
|
|
cf2e3804d5 | ||
|
|
b7be42357f | ||
|
|
3ac82c07ae | ||
|
|
9743c0a1fa | ||
|
|
ba4dac530e | ||
|
|
7f7f0040fd | ||
|
|
9ca567cd9c | ||
|
|
ae7b234ba0 | ||
|
|
9788d1965f | ||
|
|
387e43a423 | ||
|
|
25f407fc81 | ||
|
|
dcb2c4638f | ||
|
|
7fb5dd3ef9 | ||
|
|
6a38f4bec3 | ||
|
|
aed078aeab | ||
|
|
f999f41ed3 | ||
|
|
07e43ee7e4 | ||
|
|
aac706c3f0 | ||
|
|
4ccb278051 | ||
|
|
64a733480b | ||
|
|
dd0976f678 | ||
|
|
15a3ca649d | ||
|
|
3c4770cfe5 | ||
|
|
8d7de05908 | ||
|
|
863faae57f | ||
|
|
8d3f5adcdb | ||
|
|
cd3394e129 | ||
|
|
ee5d0239a3 | ||
|
|
388111cedc | ||
|
|
b86191ed7f | ||
|
|
22753486de | ||
|
|
cf1d1dbf01 | ||
|
|
2e45d9800d | ||
|
|
a9a8de02ec | ||
|
|
e0c9376aab | ||
|
|
dd8339e650 | ||
|
|
1594ee64a1 | ||
|
|
83ed8222d2 | ||
|
|
b0aa91acce | ||
|
|
5928ba83bb | ||
|
|
ffa3a43979 | ||
|
|
7ce79a43f1 | ||
|
|
edcc50ab0c | ||
|
|
4a391522d0 | ||
|
|
1903f78285 | ||
|
|
a4dfcc4693 | ||
|
|
faffb3fef7 | ||
|
|
6c6227c403 | ||
|
|
e3be364e7a | ||
|
|
90e20a60e9 | ||
|
|
33a4eacf1d | ||
|
|
e30b53fddc | ||
|
|
95b0392ed2 | ||
|
|
796798cb49 | ||
|
|
08b0ba7854 | ||
|
|
ceaf82acc6 | ||
|
|
dadc93cfe5 | ||
|
|
c7076bf95c | ||
|
|
ebdffd5626 | ||
|
|
d123880562 | ||
|
|
4864e8ea60 | ||
|
|
c86db0d7fd | ||
|
|
ccfbbb3b14 | ||
|
|
c42ba64b15 | ||
|
|
6d8bf32243 | ||
|
|
9094da4085 | ||
|
|
ebf9d2855c | ||
|
|
1bbd78eb33 | ||
|
|
77f5a69e04 | ||
|
|
ce82af2ab0 | ||
|
|
2a2d2efa17 | ||
|
|
dd494e9cac | ||
|
|
352adb7487 | ||
|
|
aebe34b4e8 | ||
|
|
c7e1241e20 | ||
|
|
6529298c24 | ||
|
|
e2dddfde85 | ||
|
|
36d96f96ec | ||
|
|
7ebcfea5a3 | ||
|
|
b20bfed33a | ||
|
|
a66a92e338 | ||
|
|
c56c2c3525 | ||
|
|
4cac072fa4 | ||
|
|
aeab6b3e28 | ||
|
|
015e261f29 | ||
|
|
d2a423dde9 | ||
|
|
3ecbfd6532 | ||
|
|
02ecb2d755 | ||
|
|
122df6e846 | ||
|
|
7d6a0a2051 | ||
|
|
6cc8af80a2 | ||
|
|
f61898f718 | ||
|
|
5cb465171e | ||
|
|
0ce37dd18f | ||
|
|
d835b183a5 | ||
|
|
d3cafebff9 | ||
|
|
354b194a25 | ||
|
|
a52d67bb84 | ||
|
|
421ea3d920 | ||
|
|
24f53f1aa1 | ||
|
|
6fc5d11de2 | ||
|
|
d17547d890 | ||
|
|
928e0d4327 | ||
|
|
05327cfbb9 | ||
|
|
8f7717014b | ||
|
|
a47e50b79a | ||
|
|
8f89d88def | ||
|
|
ec97207bb1 | ||
|
|
a2d20b0f47 | ||
|
|
8180cebd75 | ||
|
|
700ab2d782 | ||
|
|
ec9a5a061d | ||
|
|
467630f955 | ||
|
|
eac6b69bae | ||
|
|
441a5b0141 | ||
|
|
70902df6da | ||
|
|
6f893ff0b4 | ||
|
|
bda592a236 | ||
|
|
8b32e8d5ad | ||
|
|
54a065c698 | ||
|
|
b9718678b3 | ||
|
|
3fa40d2c6d | ||
|
|
883e4a4c59 | ||
|
|
e90826b331 | ||
|
|
ac04172f6d | ||
|
|
8c0000beb4 | ||
|
|
35287ab0d8 | ||
|
|
3fe4f8b038 | ||
|
|
1722678469 | ||
|
|
17da7e8706 | ||
|
|
d2e7213ff3 | ||
|
|
882cb76e8a |
9
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# Microsoft Open Source Code of Conduct
|
||||||
|
|
||||||
|
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||||
|
|
||||||
|
Resources:
|
||||||
|
|
||||||
|
- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
|
||||||
|
- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
|
||||||
|
- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns
|
||||||
100
README.md
@@ -1,77 +1,43 @@
|
|||||||
# Azure Machine Learning service example notebooks
|
# Azure Machine Learning Python SDK notebooks
|
||||||
|
|
||||||
> a community-driven repository of examples using mlflow for tracking can be found at https://github.com/Azure/azureml-examples
|
### **With the introduction of AzureML SDK v2, this samples repository for the v1 SDK is now deprecated and will not be monitored or updated. Users are encouraged to visit the [v2 SDK samples repository](https://github.com/Azure/azureml-examples) instead for up-to-date and enhanced examples of how to build, train, and deploy machine learning models with AzureML's newest features.**
|
||||||
|
|
||||||
This repository contains example notebooks demonstrating the [Azure Machine Learning](https://azure.microsoft.com/services/machine-learning-service/) Python SDK which allows you to build, train, deploy and manage machine learning solutions using Azure. The AML SDK allows you the choice of using local or cloud compute resources, while managing and maintaining the complete data science workflow from the cloud.
|
Welcome to the Azure Machine Learning Python SDK notebooks repository!
|
||||||
|
|
||||||

|
## Getting started
|
||||||
|
|
||||||
|
These notebooks are recommended for use in an Azure Machine Learning [Compute Instance](https://docs.microsoft.com/azure/machine-learning/concept-compute-instance), where you can run them without any additional set up.
|
||||||
|
|
||||||
## Quick installation
|
However, the notebooks can be run in any development environment with the correct `azureml` packages installed.
|
||||||
```sh
|
|
||||||
pip install azureml-sdk
|
|
||||||
```
|
|
||||||
Read more detailed instructions on [how to set up your environment](./NBSETUP.md) using Azure Notebook service, your own Jupyter notebook server, or Docker.
|
|
||||||
|
|
||||||
## How to navigate and use the example notebooks?
|
Install the `azureml.core` Python package:
|
||||||
If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, you should always run the [Configuration](./configuration.ipynb) notebook first when setting up a notebook library on a new machine or in a new environment. It configures your notebook library to connect to an Azure Machine Learning workspace, and sets up your workspace and compute to be used by many of the other examples.
|
|
||||||
This [index](./index.md) should assist in navigating the Azure Machine Learning notebook samples and encourage efficient retrieval of topics and content.
|
|
||||||
|
|
||||||
If you want to...
|
|
||||||
|
|
||||||
* ...try out and explore Azure ML, start with image classification tutorials: [Part 1 (Training)](./tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb) and [Part 2 (Deployment)](./tutorials/image-classification-mnist-data/img-classification-part2-deploy.ipynb).
|
|
||||||
* ...learn about experimentation and tracking run history: [track and monitor experiments](./how-to-use-azureml/track-and-monitor-experiments).
|
|
||||||
* ...train deep learning models at scale, first learn about [Machine Learning Compute](./how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb), and then try [distributed hyperparameter tuning](./how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) and [distributed training](./how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb).
|
|
||||||
* ...deploy models as a realtime scoring service, first learn the basics by [deploying to Azure Container Instance](./how-to-use-azureml/deployment/deploy-to-cloud/model-register-and-deploy.ipynb), then learn how to [production deploy models on Azure Kubernetes Cluster](./how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb).
|
|
||||||
* ...deploy models as a batch scoring service: [create Machine Learning Compute for scoring compute](./how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb) and [use Machine Learning Pipelines to deploy your model](https://aka.ms/pl-batch-scoring).
|
|
||||||
* ...monitor your deployed models, learn about using [App Insights](./how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb).
|
|
||||||
|
|
||||||
## Tutorials
|
|
||||||
|
|
||||||
The [Tutorials](./tutorials) folder contains notebooks for the tutorials described in the [Azure Machine Learning documentation](https://aka.ms/aml-docs).
|
|
||||||
|
|
||||||
## How to use Azure ML
|
|
||||||
|
|
||||||
The [How to use Azure ML](./how-to-use-azureml) folder contains specific examples demonstrating the features of the Azure Machine Learning SDK
|
|
||||||
|
|
||||||
- [Training](./how-to-use-azureml/training) - Examples of how to build models using Azure ML's logging and execution capabilities on local and remote compute targets
|
|
||||||
- [Training with ML and DL frameworks](./how-to-use-azureml/ml-frameworks) - Examples demonstrating how to build and train machine learning models at scale on Azure ML and perform hyperparameter tuning.
|
|
||||||
- [Manage Azure ML Service](./how-to-use-azureml/manage-azureml-service) - Examples how to perform tasks, such as authenticate against Azure ML service in different ways.
|
|
||||||
- [Automated Machine Learning](./how-to-use-azureml/automated-machine-learning) - Examples using Automated Machine Learning to automatically generate optimal machine learning pipelines and models
|
|
||||||
- [Machine Learning Pipelines](./how-to-use-azureml/machine-learning-pipelines) - Examples showing how to create and use reusable pipelines for training and batch scoring
|
|
||||||
- [Deployment](./how-to-use-azureml/deployment) - Examples showing how to deploy and manage machine learning models and solutions
|
|
||||||
- [Azure Databricks](./how-to-use-azureml/azure-databricks) - Examples showing how to use Azure ML with Azure Databricks
|
|
||||||
- [Reinforcement Learning](./how-to-use-azureml/reinforcement-learning) - Examples showing how to train reinforcement learning agents
|
|
||||||
|
|
||||||
---
|
|
||||||
## Documentation
|
|
||||||
|
|
||||||
* Quickstarts, end-to-end tutorials, and how-tos on the [official documentation site for Azure Machine Learning service](https://docs.microsoft.com/en-us/azure/machine-learning/service/).
|
|
||||||
* [Python SDK reference](https://docs.microsoft.com/en-us/python/api/overview/azure/ml/intro?view=azure-ml-py)
|
|
||||||
* Azure ML Data Prep SDK [overview](https://aka.ms/data-prep-sdk), [Python SDK reference](https://aka.ms/aml-data-prep-apiref), and [tutorials and how-tos](https://aka.ms/aml-data-prep-notebooks).
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
|
|
||||||
## Community Repository
|
|
||||||
Visit this [community repository](https://github.com/microsoft/MLOps/tree/master/examples) to find useful end-to-end sample notebooks. Also, please follow these [contribution guidelines](https://github.com/microsoft/MLOps/blob/master/contributing.md) when contributing to this repository.
|
|
||||||
|
|
||||||
## Projects using Azure Machine Learning
|
|
||||||
|
|
||||||
Visit following repos to see projects contributed by Azure ML users:
|
|
||||||
- [Learn about Natural Language Processing best practices using Azure Machine Learning service](https://github.com/microsoft/nlp)
|
|
||||||
- [Pre-Train BERT models using Azure Machine Learning service](https://github.com/Microsoft/AzureML-BERT)
|
|
||||||
- [Fashion MNIST with Azure ML SDK](https://github.com/amynic/azureml-sdk-fashion)
|
|
||||||
- [UMass Amherst Student Samples](https://github.com/katiehouse3/microsoft-azure-ml-notebooks) - A number of end-to-end machine learning notebooks, including machine translation, image classification, and customer churn, created by students in the 696DS course at UMass Amherst.
|
|
||||||
|
|
||||||
## Data/Telemetry
|
|
||||||
This repository collects usage data and sends it to Microsoft to help improve our products and services. Read Microsoft's [privacy statement to learn more](https://privacy.microsoft.com/en-US/privacystatement)
|
|
||||||
|
|
||||||
To opt out of tracking, please go to the raw markdown or .ipynb files and remove the following line of code:
|
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
""
|
pip install azureml-core
|
||||||
```
|
```
|
||||||
This URL will be slightly different depending on the file.
|
|
||||||
|
|
||||||

|
Install additional packages as needed:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
pip install azureml-mlflow
|
||||||
|
pip install azureml-dataset-runtime
|
||||||
|
pip install azureml-automl-runtime
|
||||||
|
pip install azureml-pipeline
|
||||||
|
pip install azureml-pipeline-steps
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
We recommend starting with one of the [quickstarts](tutorials/compute-instance-quickstarts).
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
This repository is a push-only mirror. Pull requests are ignored.
|
||||||
|
|
||||||
|
## Code of Conduct
|
||||||
|
|
||||||
|
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). Please see the [code of conduct](CODE_OF_CONDUCT.md) for details.
|
||||||
|
|
||||||
|
## Reference
|
||||||
|
|
||||||
|
- [Documentation](https://docs.microsoft.com/azure/machine-learning)
|
||||||
|
|
||||||
|
|||||||
41
SECURITY.md
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.7 BLOCK -->
|
||||||
|
|
||||||
|
## Security
|
||||||
|
|
||||||
|
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
|
||||||
|
|
||||||
|
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
|
||||||
|
|
||||||
|
## Reporting Security Issues
|
||||||
|
|
||||||
|
**Please do not report security vulnerabilities through public GitHub issues.**
|
||||||
|
|
||||||
|
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
|
||||||
|
|
||||||
|
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
|
||||||
|
|
||||||
|
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
|
||||||
|
|
||||||
|
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
||||||
|
|
||||||
|
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
||||||
|
* Full paths of source file(s) related to the manifestation of the issue
|
||||||
|
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||||
|
* Any special configuration required to reproduce the issue
|
||||||
|
* Step-by-step instructions to reproduce the issue
|
||||||
|
* Proof-of-concept or exploit code (if possible)
|
||||||
|
* Impact of the issue, including how an attacker might exploit the issue
|
||||||
|
|
||||||
|
This information will help us triage your report more quickly.
|
||||||
|
|
||||||
|
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
|
||||||
|
|
||||||
|
## Preferred Languages
|
||||||
|
|
||||||
|
We prefer all communications to be in English.
|
||||||
|
|
||||||
|
## Policy
|
||||||
|
|
||||||
|
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
|
||||||
|
|
||||||
|
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
||||||
@@ -103,7 +103,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"import azureml.core\n",
|
"import azureml.core\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.55.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -254,6 +254,8 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"Many of the sample notebooks use Azure ML managed compute (AmlCompute) to train models using a dynamically scalable pool of compute. In this section you will create default compute clusters for use by the other notebooks and any other operations you choose.\n",
|
"Many of the sample notebooks use Azure ML managed compute (AmlCompute) to train models using a dynamically scalable pool of compute. In this section you will create default compute clusters for use by the other notebooks and any other operations you choose.\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||||
|
"\n",
|
||||||
"To create a cluster, you need to specify a compute configuration that specifies the type of machine to be used and the scalability behaviors. Then you choose a name for the cluster that is unique within the workspace that can be used to address the cluster later.\n",
|
"To create a cluster, you need to specify a compute configuration that specifies the type of machine to be used and the scalability behaviors. Then you choose a name for the cluster that is unique within the workspace that can be used to address the cluster later.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"The cluster parameters are:\n",
|
"The cluster parameters are:\n",
|
||||||
@@ -327,7 +329,7 @@
|
|||||||
" print(\"Creating new gpu-cluster\")\n",
|
" print(\"Creating new gpu-cluster\")\n",
|
||||||
" \n",
|
" \n",
|
||||||
" # Specify the configuration for the new cluster\n",
|
" # Specify the configuration for the new cluster\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_NC6\",\n",
|
" compute_config = AmlCompute.provisioning_configuration(vm_size=\"Standard_NC6s_v3\",\n",
|
||||||
" min_nodes=0,\n",
|
" min_nodes=0,\n",
|
||||||
" max_nodes=4)\n",
|
" max_nodes=4)\n",
|
||||||
" # Create the cluster with the specified name and configuration\n",
|
" # Create the cluster with the specified name and configuration\n",
|
||||||
@@ -365,9 +367,9 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.6",
|
"display_name": "Python 3.8 - AzureML",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python36"
|
"name": "python38-azureml"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
|
|||||||
@@ -174,7 +174,7 @@
|
|||||||
"else:\n",
|
"else:\n",
|
||||||
" print(\"creating new cluster\")\n",
|
" print(\"creating new cluster\")\n",
|
||||||
" # vm_size parameter below could be modified to one of the RAPIDS-supported VM types\n",
|
" # vm_size parameter below could be modified to one of the RAPIDS-supported VM types\n",
|
||||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"Standard_NC6s_v2\", min_nodes=1, max_nodes = 1)\n",
|
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"Standard_NC6s_v3\", min_nodes=1, max_nodes = 1)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # create the cluster\n",
|
" # create the cluster\n",
|
||||||
" gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, provisioning_config)\n",
|
" gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, provisioning_config)\n",
|
||||||
@@ -188,13 +188,6 @@
|
|||||||
"### Script to process data and train model"
|
"### Script to process data and train model"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"The _process_data.py_ script used in the step below is a slightly modified implementation of [RAPIDS Mortgage E2E example](https://github.com/rapidsai/notebooks-contrib/blob/master/intermediate_notebooks/E2E/mortgage/mortgage_e2e.ipynb)."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
@@ -373,7 +366,7 @@
|
|||||||
"run_config.target = gpu_cluster_name\n",
|
"run_config.target = gpu_cluster_name\n",
|
||||||
"run_config.environment.docker.enabled = True\n",
|
"run_config.environment.docker.enabled = True\n",
|
||||||
"run_config.environment.docker.gpu_support = True\n",
|
"run_config.environment.docker.gpu_support = True\n",
|
||||||
"run_config.environment.docker.base_image = \"mcr.microsoft.com/azureml/base-gpu:intelmpi2018.3-cuda10.0-cudnn7-ubuntu16.04\"\n",
|
"run_config.environment.docker.base_image = \"mcr.microsoft.com/azureml/openmpi4.1.0-cuda11.1-cudnn8-ubuntu20.04\"\n",
|
||||||
"run_config.environment.spark.precache_packages = False\n",
|
"run_config.environment.spark.precache_packages = False\n",
|
||||||
"run_config.data_references={'data':data_ref.to_config()}"
|
"run_config.data_references={'data':data_ref.to_config()}"
|
||||||
]
|
]
|
||||||
@@ -405,7 +398,7 @@
|
|||||||
"# run_config.target = gpu_cluster_name\n",
|
"# run_config.target = gpu_cluster_name\n",
|
||||||
"# run_config.environment.docker.enabled = True\n",
|
"# run_config.environment.docker.enabled = True\n",
|
||||||
"# run_config.environment.docker.gpu_support = True\n",
|
"# run_config.environment.docker.gpu_support = True\n",
|
||||||
"# run_config.environment.docker.base_image = \"rapidsai/rapidsai:cuda9.2-runtime-ubuntu18.04\"\n",
|
"# run_config.environment.docker.base_image = \"rapidsai/rapidsai:cuda9.2-runtime-ubuntu20.04\"\n",
|
||||||
"# # run_config.environment.docker.base_image_registry.address = '<registry_url>' # not required if the base_image is in Docker hub\n",
|
"# # run_config.environment.docker.base_image_registry.address = '<registry_url>' # not required if the base_image is in Docker hub\n",
|
||||||
"# # run_config.environment.docker.base_image_registry.username = '<user_name>' # needed only for private images\n",
|
"# # run_config.environment.docker.base_image_registry.username = '<user_name>' # needed only for private images\n",
|
||||||
"# # run_config.environment.docker.base_image_registry.password = '<password>' # needed only for private images\n",
|
"# # run_config.environment.docker.base_image_registry.password = '<password>' # needed only for private images\n",
|
||||||
@@ -532,9 +525,9 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.6",
|
"display_name": "Python 3.8 - AzureML",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python36"
|
"name": "python38-azureml"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
|
|||||||
@@ -36,9 +36,9 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"<a id=\"Introduction\"></a>\n",
|
"<a id=\"Introduction\"></a>\n",
|
||||||
"## Introduction\n",
|
"## Introduction\n",
|
||||||
"This notebook shows how to use [Fairlearn (an open source fairness assessment and unfairness mitigation package)](http://fairlearn.github.io) and Azure Machine Learning Studio for a binary classification problem. This example uses the well-known adult census dataset. For the purposes of this notebook, we shall treat this as a loan decision problem. We will pretend that the label indicates whether or not each individual repaid a loan in the past. We will use the data to train a predictor to predict whether previously unseen individuals will repay a loan or not. The assumption is that the model predictions are used to decide whether an individual should be offered a loan. Its purpose is purely illustrative of a workflow including a fairness dashboard - in particular, we do **not** include a full discussion of the detailed issues which arise when considering fairness in machine learning. For such discussions, please [refer to the Fairlearn website](http://fairlearn.github.io/).\n",
|
"This notebook shows how to use [Fairlearn (an open source fairness assessment and unfairness mitigation package)](http://fairlearn.org) and Azure Machine Learning Studio for a binary classification problem. This example uses the well-known adult census dataset. For the purposes of this notebook, we shall treat this as a loan decision problem. We will pretend that the label indicates whether or not each individual repaid a loan in the past. We will use the data to train a predictor to predict whether previously unseen individuals will repay a loan or not. The assumption is that the model predictions are used to decide whether an individual should be offered a loan. Its purpose is purely illustrative of a workflow including a fairness dashboard - in particular, we do **not** include a full discussion of the detailed issues which arise when considering fairness in machine learning. For such discussions, please [refer to the Fairlearn website](http://fairlearn.org/).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"We will apply the [grid search algorithm](https://fairlearn.github.io/master/api_reference/fairlearn.reductions.html#fairlearn.reductions.GridSearch) from the Fairlearn package using a specific notion of fairness called Demographic Parity. This produces a set of models, and we will view these in a dashboard both locally and in the Azure Machine Learning Studio.\n",
|
"We will apply the [grid search algorithm](https://fairlearn.org/v0.4.6/api_reference/fairlearn.reductions.html#fairlearn.reductions.GridSearch) from the Fairlearn package using a specific notion of fairness called Demographic Parity. This produces a set of models, and we will view these in a dashboard both locally and in the Azure Machine Learning Studio.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"### Setup\n",
|
"### Setup\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -46,9 +46,10 @@
|
|||||||
"Please see the [configuration notebook](../../configuration.ipynb) for information about creating one, if required.\n",
|
"Please see the [configuration notebook](../../configuration.ipynb) for information about creating one, if required.\n",
|
||||||
"This notebook also requires the following packages:\n",
|
"This notebook also requires the following packages:\n",
|
||||||
"* `azureml-contrib-fairness`\n",
|
"* `azureml-contrib-fairness`\n",
|
||||||
"* `fairlearn==0.4.6` (v0.5.0 will work with minor modifications)\n",
|
"* `fairlearn>=0.6.2` (pre-v0.5.0 will work with minor modifications)\n",
|
||||||
"* `joblib`\n",
|
"* `joblib`\n",
|
||||||
"* `shap`\n",
|
"* `liac-arff`\n",
|
||||||
|
"* `raiwidgets`\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
|
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
|
||||||
]
|
]
|
||||||
@@ -85,10 +86,9 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from fairlearn.reductions import GridSearch, DemographicParity, ErrorRate\n",
|
"from fairlearn.reductions import GridSearch, DemographicParity, ErrorRate\n",
|
||||||
"from fairlearn.widget import FairlearnDashboard\n",
|
"from raiwidgets import FairnessDashboard\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from sklearn.compose import ColumnTransformer\n",
|
"from sklearn.compose import ColumnTransformer\n",
|
||||||
"from sklearn.datasets import fetch_openml\n",
|
|
||||||
"from sklearn.impute import SimpleImputer\n",
|
"from sklearn.impute import SimpleImputer\n",
|
||||||
"from sklearn.linear_model import LogisticRegression\n",
|
"from sklearn.linear_model import LogisticRegression\n",
|
||||||
"from sklearn.model_selection import train_test_split\n",
|
"from sklearn.model_selection import train_test_split\n",
|
||||||
@@ -112,9 +112,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from fairness_nb_utils import fetch_openml_with_retries\n",
|
"from fairness_nb_utils import fetch_census_dataset\n",
|
||||||
"\n",
|
"\n",
|
||||||
"data = fetch_openml_with_retries(data_id=1590)\n",
|
"data = fetch_census_dataset()\n",
|
||||||
" \n",
|
" \n",
|
||||||
"# Extract the items we want\n",
|
"# Extract the items we want\n",
|
||||||
"X_raw = data.data\n",
|
"X_raw = data.data\n",
|
||||||
@@ -137,7 +137,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"A = X_raw[['sex','race']]\n",
|
"A = X_raw[['sex','race']]\n",
|
||||||
"X_raw = X_raw.drop(labels=['sex', 'race'],axis = 1)"
|
"X_raw = X_raw.drop(labels=['sex', 'race'], axis = 1)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -257,9 +257,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"FairlearnDashboard(sensitive_features=A_test, sensitive_feature_names=['Sex', 'Race'],\n",
|
"FairnessDashboard(sensitive_features=A_test,\n",
|
||||||
" y_true=y_test,\n",
|
" y_true=y_test,\n",
|
||||||
" y_pred={\"unmitigated\": unmitigated_predictor.predict(X_test)})"
|
" y_pred={\"unmitigated\": unmitigated_predictor.predict(X_test)})"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -312,8 +312,8 @@
|
|||||||
"sweep.fit(X_train, y_train,\n",
|
"sweep.fit(X_train, y_train,\n",
|
||||||
" sensitive_features=A_train.sex)\n",
|
" sensitive_features=A_train.sex)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# For Fairlearn v0.5.0, need sweep.predictors_\n",
|
"# For Fairlearn pre-v0.5.0, need sweep._predictors\n",
|
||||||
"predictors = sweep._predictors"
|
"predictors = sweep.predictors_"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -330,16 +330,14 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"errors, disparities = [], []\n",
|
"errors, disparities = [], []\n",
|
||||||
"for m in predictors:\n",
|
"for predictor in predictors:\n",
|
||||||
" classifier = lambda X: m.predict(X)\n",
|
|
||||||
" \n",
|
|
||||||
" error = ErrorRate()\n",
|
" error = ErrorRate()\n",
|
||||||
" error.load_data(X_train, pd.Series(y_train), sensitive_features=A_train.sex)\n",
|
" error.load_data(X_train, pd.Series(y_train), sensitive_features=A_train.sex)\n",
|
||||||
" disparity = DemographicParity()\n",
|
" disparity = DemographicParity()\n",
|
||||||
" disparity.load_data(X_train, pd.Series(y_train), sensitive_features=A_train.sex)\n",
|
" disparity.load_data(X_train, pd.Series(y_train), sensitive_features=A_train.sex)\n",
|
||||||
" \n",
|
" \n",
|
||||||
" errors.append(error.gamma(classifier)[0])\n",
|
" errors.append(error.gamma(predictor.predict)[0])\n",
|
||||||
" disparities.append(disparity.gamma(classifier).max())\n",
|
" disparities.append(disparity.gamma(predictor.predict).max())\n",
|
||||||
" \n",
|
" \n",
|
||||||
"all_results = pd.DataFrame( {\"predictor\": predictors, \"error\": errors, \"disparity\": disparities})\n",
|
"all_results = pd.DataFrame( {\"predictor\": predictors, \"error\": errors, \"disparity\": disparities})\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -388,10 +386,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"FairlearnDashboard(sensitive_features=A_test, \n",
|
"FairnessDashboard(sensitive_features=A_test, \n",
|
||||||
" sensitive_feature_names=['Sex', 'Race'],\n",
|
" y_true=y_test.tolist(),\n",
|
||||||
" y_true=y_test.tolist(),\n",
|
" y_pred=predictions_dominant)"
|
||||||
" y_pred=predictions_dominant)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -410,7 +407,7 @@
|
|||||||
"<a id=\"AzureUpload\"></a>\n",
|
"<a id=\"AzureUpload\"></a>\n",
|
||||||
"## Uploading a Fairness Dashboard to Azure\n",
|
"## Uploading a Fairness Dashboard to Azure\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Uploading a fairness dashboard to Azure is a two stage process. The `FairlearnDashboard` invoked in the previous section relies on the underlying Python kernel to compute metrics on demand. This is obviously not available when the fairness dashboard is rendered in AzureML Studio. By default, the dashboard in Azure Machine Learning Studio also requires the models to be registered. The required stages are therefore:\n",
|
"Uploading a fairness dashboard to Azure is a two stage process. The `FairnessDashboard` invoked in the previous section relies on the underlying Python kernel to compute metrics on demand. This is obviously not available when the fairness dashboard is rendered in AzureML Studio. By default, the dashboard in Azure Machine Learning Studio also requires the models to be registered. The required stages are therefore:\n",
|
||||||
"1. Register the dominant models\n",
|
"1. Register the dominant models\n",
|
||||||
"1. Precompute all the required metrics\n",
|
"1. Precompute all the required metrics\n",
|
||||||
"1. Upload to Azure\n",
|
"1. Upload to Azure\n",
|
||||||
@@ -584,7 +581,7 @@
|
|||||||
"<a id=\"Conclusion\"></a>\n",
|
"<a id=\"Conclusion\"></a>\n",
|
||||||
"## Conclusion\n",
|
"## Conclusion\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In this notebook we have demonstrated how to use the `GridSearch` algorithm from Fairlearn to generate a collection of models, and then present them in the fairness dashboard in Azure Machine Learning Studio. Please remember that this notebook has not attempted to discuss the many considerations which should be part of any approach to unfairness mitigation. The [Fairlearn website](http://fairlearn.github.io/) provides that discussion"
|
"In this notebook we have demonstrated how to use the `GridSearch` algorithm from Fairlearn to generate a collection of models, and then present them in the fairness dashboard in Azure Machine Learning Studio. Please remember that this notebook has not attempted to discuss the many considerations which should be part of any approach to unfairness mitigation. The [Fairlearn website](http://fairlearn.org/) provides that discussion"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -602,9 +599,9 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.6",
|
"display_name": "Python 3.8 - AzureML",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python36"
|
"name": "python38-azureml"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
|
|||||||
@@ -3,5 +3,11 @@ dependencies:
|
|||||||
- pip:
|
- pip:
|
||||||
- azureml-sdk
|
- azureml-sdk
|
||||||
- azureml-contrib-fairness
|
- azureml-contrib-fairness
|
||||||
- fairlearn==0.4.6
|
- fairlearn>=0.6.2,<=0.7.0
|
||||||
- joblib
|
- joblib
|
||||||
|
- liac-arff
|
||||||
|
- raiwidgets~=0.33.0
|
||||||
|
- itsdangerous==2.0.1
|
||||||
|
- markupsafe<2.1.0
|
||||||
|
- protobuf==3.20.0
|
||||||
|
- numpy<1.24.0
|
||||||
|
|||||||
@@ -4,7 +4,13 @@
|
|||||||
|
|
||||||
"""Utilities for azureml-contrib-fairness notebooks."""
|
"""Utilities for azureml-contrib-fairness notebooks."""
|
||||||
|
|
||||||
|
import arff
|
||||||
|
from collections import OrderedDict
|
||||||
|
from contextlib import closing
|
||||||
|
import gzip
|
||||||
|
import pandas as pd
|
||||||
from sklearn.datasets import fetch_openml
|
from sklearn.datasets import fetch_openml
|
||||||
|
from sklearn.utils import Bunch
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|
||||||
@@ -15,7 +21,7 @@ def fetch_openml_with_retries(data_id, max_retries=4, retry_delay=60):
|
|||||||
print("Download attempt {0} of {1}".format(i + 1, max_retries))
|
print("Download attempt {0} of {1}".format(i + 1, max_retries))
|
||||||
data = fetch_openml(data_id=data_id, as_frame=True)
|
data = fetch_openml(data_id=data_id, as_frame=True)
|
||||||
break
|
break
|
||||||
except Exception as e:
|
except Exception as e: # noqa: B902
|
||||||
print("Download attempt failed with exception:")
|
print("Download attempt failed with exception:")
|
||||||
print(e)
|
print(e)
|
||||||
if i + 1 != max_retries:
|
if i + 1 != max_retries:
|
||||||
@@ -26,3 +32,80 @@ def fetch_openml_with_retries(data_id, max_retries=4, retry_delay=60):
|
|||||||
raise RuntimeError("Unable to download dataset from OpenML")
|
raise RuntimeError("Unable to download dataset from OpenML")
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
_categorical_columns = [
|
||||||
|
'workclass',
|
||||||
|
'education',
|
||||||
|
'marital-status',
|
||||||
|
'occupation',
|
||||||
|
'relationship',
|
||||||
|
'race',
|
||||||
|
'sex',
|
||||||
|
'native-country'
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def fetch_census_dataset():
|
||||||
|
"""Fetch the Adult Census Dataset.
|
||||||
|
|
||||||
|
This uses a particular URL for the Adult Census dataset. The code
|
||||||
|
is a simplified version of fetch_openml() in sklearn.
|
||||||
|
|
||||||
|
The data are copied from:
|
||||||
|
https://openml.org/data/v1/download/1595261.gz
|
||||||
|
(as of 2021-03-31)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from urllib import urlretrieve
|
||||||
|
except ImportError:
|
||||||
|
from urllib.request import urlretrieve
|
||||||
|
|
||||||
|
filename = "1595261.gz"
|
||||||
|
data_url = "https://rainotebookscdn.blob.core.windows.net/datasets/"
|
||||||
|
|
||||||
|
remaining_attempts = 5
|
||||||
|
sleep_duration = 10
|
||||||
|
while remaining_attempts > 0:
|
||||||
|
try:
|
||||||
|
urlretrieve(data_url + filename, filename)
|
||||||
|
|
||||||
|
http_stream = gzip.GzipFile(filename=filename, mode='rb')
|
||||||
|
|
||||||
|
with closing(http_stream):
|
||||||
|
def _stream_generator(response):
|
||||||
|
for line in response:
|
||||||
|
yield line.decode('utf-8')
|
||||||
|
|
||||||
|
stream = _stream_generator(http_stream)
|
||||||
|
data = arff.load(stream)
|
||||||
|
except Exception as exc: # noqa: B902
|
||||||
|
remaining_attempts -= 1
|
||||||
|
print("Error downloading dataset from {} ({} attempt(s) remaining)"
|
||||||
|
.format(data_url, remaining_attempts))
|
||||||
|
print(exc)
|
||||||
|
time.sleep(sleep_duration)
|
||||||
|
sleep_duration *= 2
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
# dataset successfully downloaded
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise Exception("Could not retrieve dataset from {}.".format(data_url))
|
||||||
|
|
||||||
|
attributes = OrderedDict(data['attributes'])
|
||||||
|
arff_columns = list(attributes)
|
||||||
|
|
||||||
|
raw_df = pd.DataFrame(data=data['data'], columns=arff_columns)
|
||||||
|
|
||||||
|
target_column_name = 'class'
|
||||||
|
target = raw_df.pop(target_column_name)
|
||||||
|
for col_name in _categorical_columns:
|
||||||
|
dtype = pd.api.types.CategoricalDtype(attributes[col_name])
|
||||||
|
raw_df[col_name] = raw_df[col_name].astype(dtype, copy=False)
|
||||||
|
|
||||||
|
result = Bunch()
|
||||||
|
result.data = raw_df
|
||||||
|
result.target = target
|
||||||
|
|
||||||
|
return result
|
||||||
|
|||||||
@@ -30,7 +30,7 @@
|
|||||||
"1. [Training Models](#TrainingModels)\n",
|
"1. [Training Models](#TrainingModels)\n",
|
||||||
"1. [Logging in to AzureML](#LoginAzureML)\n",
|
"1. [Logging in to AzureML](#LoginAzureML)\n",
|
||||||
"1. [Registering the Models](#RegisterModels)\n",
|
"1. [Registering the Models](#RegisterModels)\n",
|
||||||
"1. [Using the Fairlearn Dashboard](#LocalDashboard)\n",
|
"1. [Using the Fairness Dashboard](#LocalDashboard)\n",
|
||||||
"1. [Uploading a Fairness Dashboard to Azure](#AzureUpload)\n",
|
"1. [Uploading a Fairness Dashboard to Azure](#AzureUpload)\n",
|
||||||
" 1. Computing Fairness Metrics\n",
|
" 1. Computing Fairness Metrics\n",
|
||||||
" 1. Uploading to Azure\n",
|
" 1. Uploading to Azure\n",
|
||||||
@@ -48,9 +48,10 @@
|
|||||||
"Please see the [configuration notebook](../../configuration.ipynb) for information about creating one, if required.\n",
|
"Please see the [configuration notebook](../../configuration.ipynb) for information about creating one, if required.\n",
|
||||||
"This notebook also requires the following packages:\n",
|
"This notebook also requires the following packages:\n",
|
||||||
"* `azureml-contrib-fairness`\n",
|
"* `azureml-contrib-fairness`\n",
|
||||||
"* `fairlearn==0.4.6` (should also work with v0.5.0)\n",
|
"* `fairlearn>=0.6.2` (also works for pre-v0.5.0 with slight modifications)\n",
|
||||||
"* `joblib`\n",
|
"* `joblib`\n",
|
||||||
"* `shap`\n",
|
"* `liac-arff`\n",
|
||||||
|
"* `raiwidgets`\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
|
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
|
||||||
]
|
]
|
||||||
@@ -88,7 +89,6 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from sklearn import svm\n",
|
"from sklearn import svm\n",
|
||||||
"from sklearn.compose import ColumnTransformer\n",
|
"from sklearn.compose import ColumnTransformer\n",
|
||||||
"from sklearn.datasets import fetch_openml\n",
|
|
||||||
"from sklearn.impute import SimpleImputer\n",
|
"from sklearn.impute import SimpleImputer\n",
|
||||||
"from sklearn.linear_model import LogisticRegression\n",
|
"from sklearn.linear_model import LogisticRegression\n",
|
||||||
"from sklearn.model_selection import train_test_split\n",
|
"from sklearn.model_selection import train_test_split\n",
|
||||||
@@ -110,9 +110,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from fairness_nb_utils import fetch_openml_with_retries\n",
|
"from fairness_nb_utils import fetch_census_dataset\n",
|
||||||
"\n",
|
"\n",
|
||||||
"data = fetch_openml_with_retries(data_id=1590)\n",
|
"data = fetch_census_dataset()\n",
|
||||||
" \n",
|
" \n",
|
||||||
"# Extract the items we want\n",
|
"# Extract the items we want\n",
|
||||||
"X_raw = data.data\n",
|
"X_raw = data.data\n",
|
||||||
@@ -389,12 +389,11 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from fairlearn.widget import FairlearnDashboard\n",
|
"from raiwidgets import FairnessDashboard\n",
|
||||||
"\n",
|
"\n",
|
||||||
"FairlearnDashboard(sensitive_features=A_test, \n",
|
"FairnessDashboard(sensitive_features=A_test, \n",
|
||||||
" sensitive_feature_names=['Sex', 'Race'],\n",
|
" y_true=y_test.tolist(),\n",
|
||||||
" y_true=y_test.tolist(),\n",
|
" y_pred=ys_pred)"
|
||||||
" y_pred=ys_pred)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -404,7 +403,7 @@
|
|||||||
"<a id=\"AzureUpload\"></a>\n",
|
"<a id=\"AzureUpload\"></a>\n",
|
||||||
"## Uploading a Fairness Dashboard to Azure\n",
|
"## Uploading a Fairness Dashboard to Azure\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Uploading a fairness dashboard to Azure is a two stage process. The `FairlearnDashboard` invoked in the previous section relies on the underlying Python kernel to compute metrics on demand. This is obviously not available when the fairness dashboard is rendered in AzureML Studio. The required stages are therefore:\n",
|
"Uploading a fairness dashboard to Azure is a two stage process. The `FairnessDashboard` invoked in the previous section relies on the underlying Python kernel to compute metrics on demand. This is obviously not available when the fairness dashboard is rendered in AzureML Studio. The required stages are therefore:\n",
|
||||||
"1. Precompute all the required metrics\n",
|
"1. Precompute all the required metrics\n",
|
||||||
"1. Upload to Azure\n",
|
"1. Upload to Azure\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -524,9 +523,9 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.6",
|
"display_name": "Python 3.8 - AzureML",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python36"
|
"name": "python38-azureml"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
|
|||||||
@@ -3,5 +3,11 @@ dependencies:
|
|||||||
- pip:
|
- pip:
|
||||||
- azureml-sdk
|
- azureml-sdk
|
||||||
- azureml-contrib-fairness
|
- azureml-contrib-fairness
|
||||||
- fairlearn==0.4.6
|
- fairlearn>=0.6.2,<=0.7.0
|
||||||
- joblib
|
- joblib
|
||||||
|
- liac-arff
|
||||||
|
- raiwidgets~=0.33.0
|
||||||
|
- itsdangerous==2.0.1
|
||||||
|
- markupsafe<2.1.0
|
||||||
|
- protobuf==3.20.0
|
||||||
|
- numpy<1.24.0
|
||||||
|
|||||||
@@ -1,29 +1,25 @@
|
|||||||
name: azure_automl
|
name: azure_automl
|
||||||
|
channels:
|
||||||
|
- conda-forge
|
||||||
|
- pytorch
|
||||||
|
- main
|
||||||
dependencies:
|
dependencies:
|
||||||
# The python interpreter version.
|
# The python interpreter version.
|
||||||
# Currently Azure ML only supports 3.5.2 and later.
|
# Azure ML only supports 3.8 and later.
|
||||||
- pip==20.2.4
|
- pip==22.3.1
|
||||||
- python>=3.5.2,<3.8
|
- python>=3.9,<3.10
|
||||||
- nb_conda
|
- holidays==0.29
|
||||||
- boto3==1.15.18
|
- scipy==1.10.1
|
||||||
- matplotlib==2.1.0
|
- tqdm==4.66.1
|
||||||
- numpy==1.18.5
|
|
||||||
- cython
|
|
||||||
- urllib3<1.24
|
|
||||||
- scipy>=1.4.1,<=1.5.2
|
|
||||||
- scikit-learn==0.22.1
|
|
||||||
- pandas==0.25.1
|
|
||||||
- py-xgboost<=0.90
|
|
||||||
- conda-forge::fbprophet==0.5
|
|
||||||
- holidays==0.9.11
|
|
||||||
- pytorch::pytorch=1.4.0
|
|
||||||
- cudatoolkit=10.1.243
|
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
- azureml-widgets~=1.23.0
|
- azureml-widgets~=1.55.0
|
||||||
- pytorch-transformers==1.0.0
|
- azureml-defaults~=1.55.0
|
||||||
- spacy==2.1.8
|
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.55.0/validated_win32_requirements.txt [--no-deps]
|
||||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
- matplotlib==3.7.1
|
||||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.23.0/validated_win32_requirements.txt [--no-deps]
|
- xgboost==1.3.3
|
||||||
- PyJWT < 2.0.0
|
- prophet==1.1.4
|
||||||
|
- pandas==1.3.5
|
||||||
|
- cmdstanpy==1.1.0
|
||||||
|
- setuptools-git==1.2
|
||||||
|
|||||||
@@ -1,30 +1,30 @@
|
|||||||
name: azure_automl
|
name: azure_automl
|
||||||
|
channels:
|
||||||
|
- conda-forge
|
||||||
|
- pytorch
|
||||||
|
- main
|
||||||
dependencies:
|
dependencies:
|
||||||
# The python interpreter version.
|
# The python interpreter version.
|
||||||
# Currently Azure ML only supports 3.5.2 and later.
|
# Azure ML only supports 3.7 and later.
|
||||||
- pip==20.2.4
|
- pip==22.3.1
|
||||||
- python>=3.5.2,<3.8
|
- python>=3.9,<3.10
|
||||||
- nb_conda
|
- matplotlib==3.7.1
|
||||||
- boto3==1.15.18
|
- numpy>=1.21.6,<=1.23.5
|
||||||
- matplotlib==2.1.0
|
- urllib3==1.26.7
|
||||||
- numpy==1.18.5
|
- scipy==1.10.1
|
||||||
- cython
|
- scikit-learn=1.1.3
|
||||||
- urllib3<1.24
|
- py-xgboost<=1.3.3
|
||||||
- scipy>=1.4.1,<=1.5.2
|
- holidays==0.29
|
||||||
- scikit-learn==0.22.1
|
- pytorch::pytorch=1.11.0
|
||||||
- pandas==0.25.1
|
|
||||||
- py-xgboost<=0.90
|
|
||||||
- conda-forge::fbprophet==0.5
|
|
||||||
- holidays==0.9.11
|
|
||||||
- pytorch::pytorch=1.4.0
|
|
||||||
- cudatoolkit=10.1.243
|
- cudatoolkit=10.1.243
|
||||||
|
- notebook
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
- azureml-widgets~=1.23.0
|
- azureml-widgets~=1.55.0
|
||||||
|
- azureml-defaults~=1.55.0
|
||||||
- pytorch-transformers==1.0.0
|
- pytorch-transformers==1.0.0
|
||||||
- spacy==2.1.8
|
- spacy==2.3.9
|
||||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
- prophet==1.1.4
|
||||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.23.0/validated_linux_requirements.txt [--no-deps]
|
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.3.1.tar.gz
|
||||||
- PyJWT < 2.0.0
|
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.55.0/validated_linux_requirements.txt [--no-deps]
|
||||||
|
|
||||||
|
|||||||
@@ -1,30 +1,26 @@
|
|||||||
name: azure_automl
|
name: azure_automl
|
||||||
|
channels:
|
||||||
|
- conda-forge
|
||||||
|
- pytorch
|
||||||
|
- main
|
||||||
dependencies:
|
dependencies:
|
||||||
# The python interpreter version.
|
# The python interpreter version.
|
||||||
# Currently Azure ML only supports 3.5.2 and later.
|
# Currently Azure ML only supports 3.7 and later.
|
||||||
- pip==20.2.4
|
- pip==22.3.1
|
||||||
- nomkl
|
- python>=3.9,<3.10
|
||||||
- python>=3.5.2,<3.8
|
- numpy>=1.21.6,<=1.23.5
|
||||||
- nb_conda
|
- scipy==1.10.1
|
||||||
- boto3==1.15.18
|
- scikit-learn==1.1.3
|
||||||
- matplotlib==2.1.0
|
- holidays==0.29
|
||||||
- numpy==1.18.5
|
|
||||||
- cython
|
|
||||||
- urllib3<1.24
|
|
||||||
- scipy>=1.4.1,<=1.5.2
|
|
||||||
- scikit-learn==0.22.1
|
|
||||||
- pandas==0.25.1
|
|
||||||
- py-xgboost<=0.90
|
|
||||||
- conda-forge::fbprophet==0.5
|
|
||||||
- holidays==0.9.11
|
|
||||||
- pytorch::pytorch=1.4.0
|
|
||||||
- cudatoolkit=9.0
|
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
- azureml-widgets~=1.23.0
|
- azureml-widgets~=1.55.0
|
||||||
|
- azureml-defaults~=1.55.0
|
||||||
- pytorch-transformers==1.0.0
|
- pytorch-transformers==1.0.0
|
||||||
- spacy==2.1.8
|
- prophet==1.1.4
|
||||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
- xgboost==1.3.3
|
||||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.23.0/validated_darwin_requirements.txt [--no-deps]
|
- spacy==2.3.9
|
||||||
- PyJWT < 2.0.0
|
- matplotlib==3.7.1
|
||||||
|
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.3.1.tar.gz
|
||||||
|
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.55.0/validated_darwin_requirements.txt [--no-deps]
|
||||||
|
|||||||
@@ -33,6 +33,8 @@ if not errorlevel 1 (
|
|||||||
call conda env create -f %automl_env_file% -n %conda_env_name%
|
call conda env create -f %automl_env_file% -n %conda_env_name%
|
||||||
)
|
)
|
||||||
|
|
||||||
|
python "%conda_prefix%\scripts\pywin32_postinstall.py" -install
|
||||||
|
|
||||||
call conda activate %conda_env_name% 2>nul:
|
call conda activate %conda_env_name% 2>nul:
|
||||||
if errorlevel 1 goto ErrorExit
|
if errorlevel 1 goto ErrorExit
|
||||||
|
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ if [ $? -ne 0 ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
sed -i '' 's/AZUREML-SDK-VERSION/latest/' $AUTOML_ENV_FILE
|
sed -i '' 's/AZUREML-SDK-VERSION/latest/' $AUTOML_ENV_FILE
|
||||||
|
brew install libomp
|
||||||
|
|
||||||
if source activate $CONDA_ENV_NAME 2> /dev/null
|
if source activate $CONDA_ENV_NAME 2> /dev/null
|
||||||
then
|
then
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
from distutils.version import LooseVersion
|
from setuptools._vendor.packaging import version
|
||||||
import platform
|
import platform
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import conda
|
import conda
|
||||||
except:
|
except Exception:
|
||||||
print('Failed to import conda.')
|
print('Failed to import conda.')
|
||||||
print('This setup is usually run from the base conda environment.')
|
print('This setup is usually run from the base conda environment.')
|
||||||
print('You can activate the base environment using the command "conda activate base"')
|
print('You can activate the base environment using the command "conda activate base"')
|
||||||
@@ -17,7 +17,7 @@ if architecture != "64bit":
|
|||||||
|
|
||||||
minimumVersion = "4.7.8"
|
minimumVersion = "4.7.8"
|
||||||
|
|
||||||
versionInvalid = (LooseVersion(conda.__version__) < LooseVersion(minimumVersion))
|
versionInvalid = (version.parse(conda.__version__) < version.parse(minimumVersion))
|
||||||
|
|
||||||
if versionInvalid:
|
if versionInvalid:
|
||||||
print('Setup requires conda version ' + minimumVersion + ' or higher.')
|
print('Setup requires conda version ' + minimumVersion + ' or higher.')
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
""
|
""
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -30,6 +30,7 @@
|
|||||||
"1. [Results](#Results)\n",
|
"1. [Results](#Results)\n",
|
||||||
"1. [Deploy](#Deploy)\n",
|
"1. [Deploy](#Deploy)\n",
|
||||||
"1. [Test](#Test)\n",
|
"1. [Test](#Test)\n",
|
||||||
|
"1. [Use auto-generated code for retraining](#Using-the-auto-generated-model-training-code-for-retraining-on-new-data)\n",
|
||||||
"1. [Acknowledgements](#Acknowledgements)"
|
"1. [Acknowledgements](#Acknowledgements)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -55,6 +56,7 @@
|
|||||||
"7. Create a container image.\n",
|
"7. Create a container image.\n",
|
||||||
"8. Create an Azure Container Instance (ACI) service.\n",
|
"8. Create an Azure Container Instance (ACI) service.\n",
|
||||||
"9. Test the ACI service.\n",
|
"9. Test the ACI service.\n",
|
||||||
|
"10. Leverage the auto generated training code and use it for retraining on an updated dataset\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In addition this notebook showcases the following features\n",
|
"In addition this notebook showcases the following features\n",
|
||||||
"- **Blocking** certain pipelines\n",
|
"- **Blocking** certain pipelines\n",
|
||||||
@@ -74,9 +76,12 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"name": "automl-import"
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
"import json\n",
|
||||||
"import logging\n",
|
"import logging\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from matplotlib import pyplot as plt\n",
|
"from matplotlib import pyplot as plt\n",
|
||||||
@@ -86,7 +91,6 @@
|
|||||||
"import azureml.core\n",
|
"import azureml.core\n",
|
||||||
"from azureml.core.experiment import Experiment\n",
|
"from azureml.core.experiment import Experiment\n",
|
||||||
"from azureml.core.workspace import Workspace\n",
|
"from azureml.core.workspace import Workspace\n",
|
||||||
"from azureml.automl.core.featurization import FeaturizationConfig\n",
|
|
||||||
"from azureml.core.dataset import Dataset\n",
|
"from azureml.core.dataset import Dataset\n",
|
||||||
"from azureml.train.automl import AutoMLConfig\n",
|
"from azureml.train.automl import AutoMLConfig\n",
|
||||||
"from azureml.interpret import ExplanationClient"
|
"from azureml.interpret import ExplanationClient"
|
||||||
@@ -99,16 +103,6 @@
|
|||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -138,24 +132,27 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"name": "ws-setup"
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"ws = Workspace.from_config()\n",
|
"ws = Workspace.from_config()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# choose a name for experiment\n",
|
"# choose a name for experiment\n",
|
||||||
"experiment_name = 'automl-classification-bmarketing-all'\n",
|
"experiment_name = \"automl-classification-bmarketing-all\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"experiment=Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = {}\n",
|
"output = {}\n",
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
"output['Workspace'] = ws.name\n",
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output['Experiment Name'] = experiment.name\n",
|
"output[\"Experiment Name\"] = experiment.name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -165,6 +162,9 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Create or Attach existing AmlCompute\n",
|
"## Create or Attach existing AmlCompute\n",
|
||||||
"You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
"You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||||
|
"\n",
|
||||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||||
@@ -173,7 +173,9 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
@@ -185,12 +187,12 @@
|
|||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
" max_nodes=6)\n",
|
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||||
|
" )\n",
|
||||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||||
"\n",
|
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -223,7 +225,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"data = pd.read_csv(\"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\")\n",
|
"data = pd.read_csv(\n",
|
||||||
|
" \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\"\n",
|
||||||
|
")\n",
|
||||||
"data.head()"
|
"data.head()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -238,7 +242,12 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"missing_rate = 0.75\n",
|
"missing_rate = 0.75\n",
|
||||||
"n_missing_samples = int(np.floor(data.shape[0] * missing_rate))\n",
|
"n_missing_samples = int(np.floor(data.shape[0] * missing_rate))\n",
|
||||||
"missing_samples = np.hstack((np.zeros(data.shape[0] - n_missing_samples, dtype=np.bool), np.ones(n_missing_samples, dtype=np.bool)))\n",
|
"missing_samples = np.hstack(\n",
|
||||||
|
" (\n",
|
||||||
|
" np.zeros(data.shape[0] - n_missing_samples, dtype=bool),\n",
|
||||||
|
" np.ones(n_missing_samples, dtype=bool),\n",
|
||||||
|
" )\n",
|
||||||
|
")\n",
|
||||||
"rng = np.random.RandomState(0)\n",
|
"rng = np.random.RandomState(0)\n",
|
||||||
"rng.shuffle(missing_samples)\n",
|
"rng.shuffle(missing_samples)\n",
|
||||||
"missing_features = rng.randint(0, data.shape[1], n_missing_samples)\n",
|
"missing_features = rng.randint(0, data.shape[1], n_missing_samples)\n",
|
||||||
@@ -251,19 +260,21 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"if not os.path.isdir('data'):\n",
|
"if not os.path.isdir(\"data\"):\n",
|
||||||
" os.mkdir('data')\n",
|
" os.mkdir(\"data\")\n",
|
||||||
" \n",
|
|
||||||
"# Save the train data to a csv to be uploaded to the datastore\n",
|
"# Save the train data to a csv to be uploaded to the datastore\n",
|
||||||
"pd.DataFrame(data).to_csv(\"data/train_data.csv\", index=False)\n",
|
"pd.DataFrame(data).to_csv(\"data/train_data.csv\", index=False)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"ds = ws.get_default_datastore()\n",
|
"ds = ws.get_default_datastore()\n",
|
||||||
"ds.upload(src_dir='./data', target_path='bankmarketing', overwrite=True, show_progress=True)\n",
|
"ds.upload(\n",
|
||||||
|
" src_dir=\"./data\", target_path=\"bankmarketing\", overwrite=True, show_progress=True\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
" \n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"# Upload the training data as a tabular dataset for access during training on remote compute\n",
|
"# Upload the training data as a tabular dataset for access during training on remote compute\n",
|
||||||
"train_data = Dataset.Tabular.from_delimited_files(path=ds.path('bankmarketing/train_data.csv'))\n",
|
"train_data = Dataset.Tabular.from_delimited_files(\n",
|
||||||
|
" path=ds.path(\"bankmarketing/train_data.csv\")\n",
|
||||||
|
")\n",
|
||||||
"label = \"y\""
|
"label = \"y\""
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -323,6 +334,7 @@
|
|||||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||||
"|**training_data**|Input dataset, containing both features and label column.|\n",
|
"|**training_data**|Input dataset, containing both features and label column.|\n",
|
||||||
"|**label_column_name**|The name of the label column.|\n",
|
"|**label_column_name**|The name of the label column.|\n",
|
||||||
|
"|**enable_code_generation**|Flag to enable generation of training code for each of the models that AutoML is creating.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||||
]
|
]
|
||||||
@@ -334,33 +346,37 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"automl_settings = {\n",
|
"automl_settings = {\n",
|
||||||
" \"experiment_timeout_hours\" : 0.3,\n",
|
" \"experiment_timeout_hours\": 0.3,\n",
|
||||||
" \"enable_early_stopping\" : True,\n",
|
" \"enable_early_stopping\": True,\n",
|
||||||
" \"iteration_timeout_minutes\": 5,\n",
|
" \"iteration_timeout_minutes\": 5,\n",
|
||||||
" \"max_concurrent_iterations\": 4,\n",
|
" \"max_concurrent_iterations\": 4,\n",
|
||||||
" \"max_cores_per_iteration\": -1,\n",
|
" \"max_cores_per_iteration\": -1,\n",
|
||||||
" #\"n_cross_validations\": 2,\n",
|
" # \"n_cross_validations\": 2,\n",
|
||||||
" \"primary_metric\": 'AUC_weighted',\n",
|
" \"primary_metric\": \"AUC_weighted\",\n",
|
||||||
" \"featurization\": 'auto',\n",
|
" \"featurization\": \"auto\",\n",
|
||||||
" \"verbosity\": logging.INFO,\n",
|
" \"verbosity\": logging.INFO,\n",
|
||||||
|
" \"enable_code_generation\": True,\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" debug_log = 'automl_errors.log',\n",
|
" task=\"classification\",\n",
|
||||||
" compute_target=compute_target,\n",
|
" debug_log=\"automl_errors.log\",\n",
|
||||||
" experiment_exit_score = 0.9984,\n",
|
" compute_target=compute_target,\n",
|
||||||
" blocked_models = ['KNN','LinearSVM'],\n",
|
" experiment_exit_score=0.9984,\n",
|
||||||
" enable_onnx_compatible_models=True,\n",
|
" blocked_models=[\"KNN\", \"LinearSVM\"],\n",
|
||||||
" training_data = train_data,\n",
|
" enable_onnx_compatible_models=True,\n",
|
||||||
" label_column_name = label,\n",
|
" training_data=train_data,\n",
|
||||||
" validation_data = validation_dataset,\n",
|
" label_column_name=label,\n",
|
||||||
" **automl_settings\n",
|
" validation_data=validation_dataset,\n",
|
||||||
" )"
|
" **automl_settings,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||||
]
|
]
|
||||||
@@ -368,24 +384,19 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"name": "experiment-submit"
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"remote_run = experiment.submit(automl_config, show_output = False)"
|
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"remote_run"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"Run the following cell to access previous runs. Uncomment the cell below and update the run_id."
|
"Run the following cell to access previous runs. Uncomment the cell below and update the run_id."
|
||||||
]
|
]
|
||||||
@@ -396,9 +407,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#from azureml.train.automl.run import AutoMLRun\n",
|
"# from azureml.train.automl.run import AutoMLRun\n",
|
||||||
"#remote_run = AutoMLRun(experiment=experiment, run_id='<run_ID_goes_here')\n",
|
"# remote_run = AutoMLRun(experiment=experiment, run_id='<run_ID_goes_here')\n",
|
||||||
"#remote_run"
|
"# remote_run"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -417,7 +428,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"best_run_customized, fitted_model_customized = remote_run.get_output()"
|
"# Retrieve the best Run object\n",
|
||||||
|
"best_run = remote_run.get_best_child()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -426,7 +438,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Transparency\n",
|
"## Transparency\n",
|
||||||
"\n",
|
"\n",
|
||||||
"View updated featurization summary"
|
"View featurization summary for the best model - to study how different features were transformed. This is stored as a JSON file in the outputs directory for the run."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -435,36 +447,16 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"custom_featurizer = fitted_model_customized.named_steps['datatransformer']\n",
|
"# Download the featurization summary JSON file locally\n",
|
||||||
"df = custom_featurizer.get_featurization_summary()\n",
|
"best_run.download_file(\n",
|
||||||
"pd.DataFrame(data=df)"
|
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||||
]
|
")\n",
|
||||||
},
|
"\n",
|
||||||
{
|
"# Render the JSON as a pandas DataFrame\n",
|
||||||
"cell_type": "markdown",
|
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||||
"metadata": {},
|
" records = json.load(f)\n",
|
||||||
"source": [
|
"\n",
|
||||||
"Set `is_user_friendly=False` to get a more detailed summary for the transforms being applied."
|
"pd.DataFrame.from_records(records)"
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"df = custom_featurizer.get_featurization_summary(is_user_friendly=False)\n",
|
|
||||||
"pd.DataFrame(data=df)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"df = custom_featurizer.get_stats_feature_type_summary()\n",
|
|
||||||
"pd.DataFrame(data=df)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -477,11 +469,14 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"name": "run-details"
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.widgets import RunDetails\n",
|
"from azureml.widgets import RunDetails\n",
|
||||||
"RunDetails(remote_run).show() "
|
"\n",
|
||||||
|
"RunDetails(remote_run).show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -500,13 +495,16 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"# Wait for the best model explanation run to complete\n",
|
"# Wait for the best model explanation run to complete\n",
|
||||||
"from azureml.core.run import Run\n",
|
"from azureml.core.run import Run\n",
|
||||||
|
"\n",
|
||||||
"model_explainability_run_id = remote_run.id + \"_\" + \"ModelExplain\"\n",
|
"model_explainability_run_id = remote_run.id + \"_\" + \"ModelExplain\"\n",
|
||||||
"print(model_explainability_run_id)\n",
|
"print(model_explainability_run_id)\n",
|
||||||
"model_explainability_run = Run(experiment=experiment, run_id=model_explainability_run_id)\n",
|
"model_explainability_run = Run(\n",
|
||||||
|
" experiment=experiment, run_id=model_explainability_run_id\n",
|
||||||
|
")\n",
|
||||||
"model_explainability_run.wait_for_completion()\n",
|
"model_explainability_run.wait_for_completion()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Get the best run object\n",
|
"# Get the best run object\n",
|
||||||
"best_run, fitted_model = remote_run.get_output()"
|
"best_run = remote_run.get_best_child()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -583,6 +581,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.automl.runtime.onnx_convert import OnnxConverter\n",
|
"from azureml.automl.runtime.onnx_convert import OnnxConverter\n",
|
||||||
|
"\n",
|
||||||
"onnx_fl_path = \"./best_model.onnx\"\n",
|
"onnx_fl_path = \"./best_model.onnx\"\n",
|
||||||
"OnnxConverter.save_onnx_model(onnx_mdl, onnx_fl_path)"
|
"OnnxConverter.save_onnx_model(onnx_mdl, onnx_fl_path)"
|
||||||
]
|
]
|
||||||
@@ -605,33 +604,31 @@
|
|||||||
"from azureml.automl.core.onnx_convert import OnnxConvertConstants\n",
|
"from azureml.automl.core.onnx_convert import OnnxConvertConstants\n",
|
||||||
"from azureml.train.automl import constants\n",
|
"from azureml.train.automl import constants\n",
|
||||||
"\n",
|
"\n",
|
||||||
"if sys.version_info < OnnxConvertConstants.OnnxIncompatiblePythonVersion:\n",
|
|
||||||
" python_version_compatible = True\n",
|
|
||||||
"else:\n",
|
|
||||||
" python_version_compatible = False\n",
|
|
||||||
"\n",
|
|
||||||
"import onnxruntime\n",
|
|
||||||
"from azureml.automl.runtime.onnx_convert import OnnxInferenceHelper\n",
|
"from azureml.automl.runtime.onnx_convert import OnnxInferenceHelper\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def get_onnx_res(run):\n",
|
|
||||||
" res_path = 'onnx_resource.json'\n",
|
|
||||||
" run.download_file(name=constants.MODEL_RESOURCE_PATH_ONNX, output_file_path=res_path)\n",
|
|
||||||
" with open(res_path) as f:\n",
|
|
||||||
" onnx_res = json.load(f)\n",
|
|
||||||
" return onnx_res\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"if python_version_compatible:\n",
|
"def get_onnx_res(run):\n",
|
||||||
|
" res_path = \"onnx_resource.json\"\n",
|
||||||
|
" run.download_file(\n",
|
||||||
|
" name=constants.MODEL_RESOURCE_PATH_ONNX, output_file_path=res_path\n",
|
||||||
|
" )\n",
|
||||||
|
" with open(res_path) as f:\n",
|
||||||
|
" result = json.load(f)\n",
|
||||||
|
" return result\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"if sys.version_info < OnnxConvertConstants.OnnxIncompatiblePythonVersion:\n",
|
||||||
" test_df = test_dataset.to_pandas_dataframe()\n",
|
" test_df = test_dataset.to_pandas_dataframe()\n",
|
||||||
" mdl_bytes = onnx_mdl.SerializeToString()\n",
|
" mdl_bytes = onnx_mdl.SerializeToString()\n",
|
||||||
" onnx_res = get_onnx_res(best_run)\n",
|
" onnx_result = get_onnx_res(best_run)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" onnxrt_helper = OnnxInferenceHelper(mdl_bytes, onnx_res)\n",
|
" onnxrt_helper = OnnxInferenceHelper(mdl_bytes, onnx_result)\n",
|
||||||
" pred_onnx, pred_prob_onnx = onnxrt_helper.predict(test_df)\n",
|
" pred_onnx, pred_prob_onnx = onnxrt_helper.predict(test_df)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" print(pred_onnx)\n",
|
" print(pred_onnx)\n",
|
||||||
" print(pred_prob_onnx)\n",
|
" print(pred_prob_onnx)\n",
|
||||||
"else:\n",
|
"else:\n",
|
||||||
" print('Please use Python version 3.6 or 3.7 to run the inference helper.')"
|
" print(\"Please use Python version 3.6 or 3.7 to run the inference helper.\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -642,7 +639,16 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"### Retrieve the Best Model\n",
|
"### Retrieve the Best Model\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
|
"Below we select the best pipeline from our iterations. The `get_best_child` method returns the Run object for the best model based on the default primary metric. There are additional flags that can be passed to the method if we want to retrieve the best Run based on any of the other supported metrics, or if we are just interested in the best run among the ONNX compatible runs. As always, you can execute `??remote_run.get_best_child` in a new cell to view the source or docs for the function."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"??remote_run.get_best_child"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -662,7 +668,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"best_run, fitted_model = remote_run.get_output()"
|
"best_run = remote_run.get_best_child()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -671,11 +677,11 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model_name = best_run.properties['model_name']\n",
|
"model_name = best_run.properties[\"model_name\"]\n",
|
||||||
"\n",
|
"\n",
|
||||||
"script_file_name = 'inference/score.py'\n",
|
"script_file_name = \"inference/score.py\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"best_run.download_file('outputs/scoring_file_v_1_0_0.py', 'inference/score.py')"
|
"best_run.download_file(\"outputs/scoring_file_v_1_0_0.py\", \"inference/score.py\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -692,11 +698,15 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"description = 'AutoML Model trained on bank marketing data to predict if a client will subscribe to a term deposit'\n",
|
"description = \"AutoML Model trained on bank marketing data to predict if a client will subscribe to a term deposit\"\n",
|
||||||
"tags = None\n",
|
"tags = None\n",
|
||||||
"model = remote_run.register_model(model_name = model_name, description = description, tags = tags)\n",
|
"model = remote_run.register_model(\n",
|
||||||
|
" model_name=model_name, description=description, tags=tags\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(remote_run.model_id) # This will be written to the script file later in the notebook."
|
"print(\n",
|
||||||
|
" remote_run.model_id\n",
|
||||||
|
") # This will be written to the script file later in the notebook."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -718,14 +728,18 @@
|
|||||||
"from azureml.core.model import Model\n",
|
"from azureml.core.model import Model\n",
|
||||||
"from azureml.core.environment import Environment\n",
|
"from azureml.core.environment import Environment\n",
|
||||||
"\n",
|
"\n",
|
||||||
"inference_config = InferenceConfig(entry_script=script_file_name)\n",
|
"inference_config = InferenceConfig(\n",
|
||||||
|
" environment=best_run.get_environment(), entry_script=script_file_name\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, \n",
|
"aciconfig = AciWebservice.deploy_configuration(\n",
|
||||||
" memory_gb = 1, \n",
|
" cpu_cores=2,\n",
|
||||||
" tags = {'area': \"bmData\", 'type': \"automl_classification\"}, \n",
|
" memory_gb=2,\n",
|
||||||
" description = 'sample service for Automl Classification')\n",
|
" tags={\"area\": \"bmData\", \"type\": \"automl_classification\"},\n",
|
||||||
|
" description=\"sample service for Automl Classification\",\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"aci_service_name = 'automl-sample-bankmarketing-all'\n",
|
"aci_service_name = model_name.lower()\n",
|
||||||
"print(aci_service_name)\n",
|
"print(aci_service_name)\n",
|
||||||
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
|
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
|
||||||
"aci_service.wait_for_deployment(True)\n",
|
"aci_service.wait_for_deployment(True)\n",
|
||||||
@@ -747,7 +761,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#aci_service.get_logs()"
|
"# aci_service.get_logs()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -777,8 +791,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"X_test = test_dataset.drop_columns(columns=['y'])\n",
|
"X_test = test_dataset.drop_columns(columns=[\"y\"])\n",
|
||||||
"y_test = test_dataset.keep_columns(columns=['y'], validate=True)\n",
|
"y_test = test_dataset.keep_columns(columns=[\"y\"], validate=True)\n",
|
||||||
"test_dataset.take(5).to_pandas_dataframe()"
|
"test_dataset.take(5).to_pandas_dataframe()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -798,16 +812,15 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import json\n",
|
|
||||||
"import requests\n",
|
"import requests\n",
|
||||||
"\n",
|
"\n",
|
||||||
"X_test_json = X_test.to_json(orient='records')\n",
|
"X_test_json = X_test.to_json(orient=\"records\")\n",
|
||||||
"data = \"{\\\"data\\\": \" + X_test_json +\"}\"\n",
|
"data = '{\"data\": ' + X_test_json + \"}\"\n",
|
||||||
"headers = {'Content-Type': 'application/json'}\n",
|
"headers = {\"Content-Type\": \"application/json\"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"resp = requests.post(aci_service.scoring_uri, data, headers=headers)\n",
|
"resp = requests.post(aci_service.scoring_uri, data, headers=headers)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"y_pred = json.loads(json.loads(resp.text))['result']"
|
"y_pred = json.loads(json.loads(resp.text))[\"result\"]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -817,7 +830,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"actual = array(y_test)\n",
|
"actual = array(y_test)\n",
|
||||||
"actual = actual[:,0]\n",
|
"actual = actual[:, 0]\n",
|
||||||
"print(len(y_pred), \" \", len(actual))"
|
"print(len(y_pred), \" \", len(actual))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -838,23 +851,28 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"%matplotlib notebook\n",
|
"%matplotlib notebook\n",
|
||||||
"from sklearn.metrics import confusion_matrix\n",
|
"from sklearn.metrics import confusion_matrix\n",
|
||||||
"import numpy as np\n",
|
|
||||||
"import itertools\n",
|
"import itertools\n",
|
||||||
"\n",
|
"\n",
|
||||||
"cf =confusion_matrix(actual,y_pred)\n",
|
"cf = confusion_matrix(actual, y_pred)\n",
|
||||||
"plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')\n",
|
"plt.imshow(cf, cmap=plt.cm.Blues, interpolation=\"nearest\")\n",
|
||||||
"plt.colorbar()\n",
|
"plt.colorbar()\n",
|
||||||
"plt.title('Confusion Matrix')\n",
|
"plt.title(\"Confusion Matrix\")\n",
|
||||||
"plt.xlabel('Predicted')\n",
|
"plt.xlabel(\"Predicted\")\n",
|
||||||
"plt.ylabel('Actual')\n",
|
"plt.ylabel(\"Actual\")\n",
|
||||||
"class_labels = ['no','yes']\n",
|
"class_labels = [\"no\", \"yes\"]\n",
|
||||||
"tick_marks = np.arange(len(class_labels))\n",
|
"tick_marks = np.arange(len(class_labels))\n",
|
||||||
"plt.xticks(tick_marks,class_labels)\n",
|
"plt.xticks(tick_marks, class_labels)\n",
|
||||||
"plt.yticks([-0.5,0,1,1.5],['','no','yes',''])\n",
|
"plt.yticks([-0.5, 0, 1, 1.5], [\"\", \"no\", \"yes\", \"\"])\n",
|
||||||
"# plotting text value inside cells\n",
|
"# plotting text value inside cells\n",
|
||||||
"thresh = cf.max() / 2.\n",
|
"thresh = cf.max() / 2.0\n",
|
||||||
"for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])):\n",
|
"for i, j in itertools.product(range(cf.shape[0]), range(cf.shape[1])):\n",
|
||||||
" plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')\n",
|
" plt.text(\n",
|
||||||
|
" j,\n",
|
||||||
|
" i,\n",
|
||||||
|
" format(cf[i, j], \"d\"),\n",
|
||||||
|
" horizontalalignment=\"center\",\n",
|
||||||
|
" color=\"white\" if cf[i, j] > thresh else \"black\",\n",
|
||||||
|
" )\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -876,6 +894,142 @@
|
|||||||
"aci_service.delete()"
|
"aci_service.delete()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Using the auto generated model training code for retraining on new data\n",
|
||||||
|
"\n",
|
||||||
|
"Because we enabled code generation when the original experiment was created, we now have access to the code that was used to generate any of the AutoML tried models. Below we'll be using the generated training script of the best model to retrain on a new dataset.\n",
|
||||||
|
"\n",
|
||||||
|
"For this demo, we'll begin by creating new retraining dataset by combining the Train & Validation datasets that were used in the original experiment."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"original_train_data = pd.read_csv(\n",
|
||||||
|
" \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"valid_data = pd.read_csv(\n",
|
||||||
|
" \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_validate.csv\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# we'll emulate an updated dataset for retraining by combining the Train & Validation datasets into a new one\n",
|
||||||
|
"retrain_pd = pd.concat([original_train_data, valid_data])\n",
|
||||||
|
"retrain_pd.to_csv(\"data/retrain_data.csv\", index=False)\n",
|
||||||
|
"ds.upload_files(\n",
|
||||||
|
" files=[\"data/retrain_data.csv\"],\n",
|
||||||
|
" target_path=\"bankmarketing/\",\n",
|
||||||
|
" overwrite=True,\n",
|
||||||
|
" show_progress=True,\n",
|
||||||
|
")\n",
|
||||||
|
"retrain_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||||
|
" path=ds.path(\"bankmarketing/retrain_data.csv\")\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# after creating and uploading the retraining dataset, let's register it with the workspace for reuse\n",
|
||||||
|
"retrain_dataset = retrain_dataset.register(\n",
|
||||||
|
" workspace=ws,\n",
|
||||||
|
" name=\"Bankmarketing_retrain\",\n",
|
||||||
|
" description=\"Updated training dataset, includes validation data\",\n",
|
||||||
|
" create_new_version=True,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Next, we'll download the generated script for the best run and use it for retraining. For more advanced scenarios, you can customize the training script as you need: change the featurization pipeline, change the learner algorithm or its hyperparameters, etc. \n",
|
||||||
|
"\n",
|
||||||
|
"For this exercise, we'll leave the script as it was generated."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# download the autogenerated training script into the generated_code folder\n",
|
||||||
|
"best_run.download_file(\n",
|
||||||
|
" \"outputs/generated_code/script.py\", \"generated_code/training_script.py\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# view the contents of the autogenerated training script\n",
|
||||||
|
"! cat generated_code/training_script.py"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import uuid\n",
|
||||||
|
"from azureml.core import ScriptRunConfig\n",
|
||||||
|
"from azureml._restclient.models import RunTypeV2\n",
|
||||||
|
"from azureml._restclient.models.create_run_dto import CreateRunDto\n",
|
||||||
|
"from azureml._restclient.run_client import RunClient\n",
|
||||||
|
"\n",
|
||||||
|
"codegen_runid = str(uuid.uuid4())\n",
|
||||||
|
"client = RunClient(\n",
|
||||||
|
" experiment.workspace.service_context,\n",
|
||||||
|
" experiment.name,\n",
|
||||||
|
" codegen_runid,\n",
|
||||||
|
" experiment_id=experiment.id,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# override the training_dataset_id to point to our new retraining dataset we just registered above\n",
|
||||||
|
"dataset_arguments = [\"--training_dataset_id\", retrain_dataset.id]\n",
|
||||||
|
"\n",
|
||||||
|
"# create the retraining run as a child of the AutoML generated training run\n",
|
||||||
|
"create_run_dto = CreateRunDto(\n",
|
||||||
|
" run_id=codegen_runid,\n",
|
||||||
|
" parent_run_id=best_run.id,\n",
|
||||||
|
" description=\"AutoML Codegen Script Run using an updated training dataset\",\n",
|
||||||
|
" target=cpu_cluster_name,\n",
|
||||||
|
" run_type_v2=RunTypeV2(orchestrator=\"Execution\", traits=[\"automl-codegen\"]),\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# the script for retraining run is pointing to the AutoML generated script\n",
|
||||||
|
"src = ScriptRunConfig(\n",
|
||||||
|
" source_directory=\"generated_code\",\n",
|
||||||
|
" script=\"training_script.py\",\n",
|
||||||
|
" arguments=dataset_arguments,\n",
|
||||||
|
" compute_target=cpu_cluster_name,\n",
|
||||||
|
" environment=best_run.get_environment(),\n",
|
||||||
|
")\n",
|
||||||
|
"run_dto = client.create_run(run_id=codegen_runid, create_run_dto=create_run_dto)\n",
|
||||||
|
"\n",
|
||||||
|
"# submit the experiment\n",
|
||||||
|
"retraining_run = experiment.submit(config=src, run_id=codegen_runid)\n",
|
||||||
|
"retraining_run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"After the run completes, we can get download/test/deploy to the model it has built."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"retraining_run.wait_for_completion()\n",
|
||||||
|
"\n",
|
||||||
|
"retraining_run.download_file(\"outputs/model.pkl\", \"generated_code/model.pkl\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -918,10 +1072,13 @@
|
|||||||
],
|
],
|
||||||
"friendly_name": "Automated ML run with basic edition features.",
|
"friendly_name": "Automated ML run with basic edition features.",
|
||||||
"index_order": 5,
|
"index_order": 5,
|
||||||
|
"kernel_info": {
|
||||||
|
"name": "python3-azureml"
|
||||||
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.6",
|
"display_name": "Python 3.8 - AzureML",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python36"
|
"name": "python38-azureml"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
@@ -933,7 +1090,10 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.6.7"
|
"version": "3.8.12"
|
||||||
|
},
|
||||||
|
"nteract": {
|
||||||
|
"version": "nteract-front-end@1.0.0"
|
||||||
},
|
},
|
||||||
"tags": [
|
"tags": [
|
||||||
"featurization",
|
"featurization",
|
||||||
@@ -944,5 +1104,5 @@
|
|||||||
"task": "Classification"
|
"task": "Classification"
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 2
|
"nbformat_minor": 1
|
||||||
}
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-classification-bank-marketing-all-features
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -87,16 +87,6 @@
|
|||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
@@ -106,18 +96,19 @@
|
|||||||
"ws = Workspace.from_config()\n",
|
"ws = Workspace.from_config()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# choose a name for experiment\n",
|
"# choose a name for experiment\n",
|
||||||
"experiment_name = 'automl-classification-ccard-remote'\n",
|
"experiment_name = \"automl-classification-ccard-remote\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"experiment=Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = {}\n",
|
"output = {}\n",
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
"output['Workspace'] = ws.name\n",
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output['Experiment Name'] = experiment.name\n",
|
"output[\"Experiment Name\"] = experiment.name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -127,6 +118,9 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Create or Attach existing AmlCompute\n",
|
"## Create or Attach existing AmlCompute\n",
|
||||||
"A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
"A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||||
|
"\n",
|
||||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||||
@@ -147,12 +141,12 @@
|
|||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
" max_nodes=6)\n",
|
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||||
|
" )\n",
|
||||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||||
"\n",
|
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -175,13 +169,15 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"name": "load-data"
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
|
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
|
||||||
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
||||||
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
||||||
"label_column_name = 'Class'"
|
"label_column_name = \"Class\""
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -207,25 +203,28 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"name": "automl-config"
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"automl_settings = {\n",
|
"automl_settings = {\n",
|
||||||
" \"n_cross_validations\": 3,\n",
|
" \"n_cross_validations\": 3,\n",
|
||||||
" \"primary_metric\": 'average_precision_score_weighted',\n",
|
" \"primary_metric\": \"average_precision_score_weighted\",\n",
|
||||||
" \"enable_early_stopping\": True,\n",
|
" \"enable_early_stopping\": True,\n",
|
||||||
" \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n",
|
" \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n",
|
||||||
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n",
|
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n",
|
||||||
" \"verbosity\": logging.INFO,\n",
|
" \"verbosity\": logging.INFO,\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" debug_log = 'automl_errors.log',\n",
|
" task=\"classification\",\n",
|
||||||
" compute_target = compute_target,\n",
|
" debug_log=\"automl_errors.log\",\n",
|
||||||
" training_data = training_data,\n",
|
" compute_target=compute_target,\n",
|
||||||
" label_column_name = label_column_name,\n",
|
" training_data=training_data,\n",
|
||||||
" **automl_settings\n",
|
" label_column_name=label_column_name,\n",
|
||||||
" )"
|
" **automl_settings,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -241,7 +240,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"remote_run = experiment.submit(automl_config, show_output = False)"
|
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -251,17 +250,8 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# If you need to retrieve a run that already started, use the following code\n",
|
"# If you need to retrieve a run that already started, use the following code\n",
|
||||||
"#from azureml.train.automl.run import AutoMLRun\n",
|
"# from azureml.train.automl.run import AutoMLRun\n",
|
||||||
"#remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
"# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"remote_run"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -293,6 +283,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.widgets import RunDetails\n",
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"\n",
|
||||||
"RunDetails(remote_run).show()"
|
"RunDetails(remote_run).show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -359,8 +350,12 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# convert the test data to dataframe\n",
|
"# convert the test data to dataframe\n",
|
||||||
"X_test_df = validation_data.drop_columns(columns=[label_column_name]).to_pandas_dataframe()\n",
|
"X_test_df = validation_data.drop_columns(\n",
|
||||||
"y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True).to_pandas_dataframe()"
|
" columns=[label_column_name]\n",
|
||||||
|
").to_pandas_dataframe()\n",
|
||||||
|
"y_test_df = validation_data.keep_columns(\n",
|
||||||
|
" columns=[label_column_name], validate=True\n",
|
||||||
|
").to_pandas_dataframe()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -394,20 +389,26 @@
|
|||||||
"import numpy as np\n",
|
"import numpy as np\n",
|
||||||
"import itertools\n",
|
"import itertools\n",
|
||||||
"\n",
|
"\n",
|
||||||
"cf =confusion_matrix(y_test_df.values,y_pred)\n",
|
"cf = confusion_matrix(y_test_df.values, y_pred)\n",
|
||||||
"plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')\n",
|
"plt.imshow(cf, cmap=plt.cm.Blues, interpolation=\"nearest\")\n",
|
||||||
"plt.colorbar()\n",
|
"plt.colorbar()\n",
|
||||||
"plt.title('Confusion Matrix')\n",
|
"plt.title(\"Confusion Matrix\")\n",
|
||||||
"plt.xlabel('Predicted')\n",
|
"plt.xlabel(\"Predicted\")\n",
|
||||||
"plt.ylabel('Actual')\n",
|
"plt.ylabel(\"Actual\")\n",
|
||||||
"class_labels = ['False','True']\n",
|
"class_labels = [\"False\", \"True\"]\n",
|
||||||
"tick_marks = np.arange(len(class_labels))\n",
|
"tick_marks = np.arange(len(class_labels))\n",
|
||||||
"plt.xticks(tick_marks,class_labels)\n",
|
"plt.xticks(tick_marks, class_labels)\n",
|
||||||
"plt.yticks([-0.5,0,1,1.5],['','False','True',''])\n",
|
"plt.yticks([-0.5, 0, 1, 1.5], [\"\", \"False\", \"True\", \"\"])\n",
|
||||||
"# plotting text value inside cells\n",
|
"# plotting text value inside cells\n",
|
||||||
"thresh = cf.max() / 2.\n",
|
"thresh = cf.max() / 2.0\n",
|
||||||
"for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])):\n",
|
"for i, j in itertools.product(range(cf.shape[0]), range(cf.shape[1])):\n",
|
||||||
" plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')\n",
|
" plt.text(\n",
|
||||||
|
" j,\n",
|
||||||
|
" i,\n",
|
||||||
|
" format(cf[i, j], \"d\"),\n",
|
||||||
|
" horizontalalignment=\"center\",\n",
|
||||||
|
" color=\"white\" if cf[i, j] > thresh else \"black\",\n",
|
||||||
|
" )\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -471,9 +472,9 @@
|
|||||||
"friendly_name": "Classification of credit card fraudulent transactions using Automated ML",
|
"friendly_name": "Classification of credit card fraudulent transactions using Automated ML",
|
||||||
"index_order": 5,
|
"index_order": 5,
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.6",
|
"display_name": "Python 3.8 - AzureML",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python36"
|
"name": "python38-azureml"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-classification-credit-card-fraud
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -1,589 +0,0 @@
|
|||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
|
||||||
"\n",
|
|
||||||
"Licensed under the MIT License."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
""
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Automated Machine Learning\n",
|
|
||||||
"_**Text Classification Using Deep Learning**_\n",
|
|
||||||
"\n",
|
|
||||||
"## Contents\n",
|
|
||||||
"1. [Introduction](#Introduction)\n",
|
|
||||||
"1. [Setup](#Setup)\n",
|
|
||||||
"1. [Data](#Data)\n",
|
|
||||||
"1. [Train](#Train)\n",
|
|
||||||
"1. [Evaluate](#Evaluate)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Introduction\n",
|
|
||||||
"This notebook demonstrates classification with text data using deep learning in AutoML.\n",
|
|
||||||
"\n",
|
|
||||||
"AutoML highlights here include using deep neural networks (DNNs) to create embedded features from text data. Depending on the compute cluster the user provides, AutoML tried out Bidirectional Encoder Representations from Transformers (BERT) when a GPU compute is used, and Bidirectional Long-Short Term neural network (BiLSTM) when a CPU compute is used, thereby optimizing the choice of DNN for the uesr's setup.\n",
|
|
||||||
"\n",
|
|
||||||
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
|
||||||
"\n",
|
|
||||||
"Notebook synopsis:\n",
|
|
||||||
"\n",
|
|
||||||
"1. Creating an Experiment in an existing Workspace\n",
|
|
||||||
"2. Configuration and remote run of AutoML for a text dataset (20 Newsgroups dataset from scikit-learn) for classification\n",
|
|
||||||
"3. Registering the best model for future use\n",
|
|
||||||
"4. Evaluating the final model on a test set"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Setup"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import logging\n",
|
|
||||||
"import os\n",
|
|
||||||
"import shutil\n",
|
|
||||||
"\n",
|
|
||||||
"import pandas as pd\n",
|
|
||||||
"\n",
|
|
||||||
"import azureml.core\n",
|
|
||||||
"from azureml.core.experiment import Experiment\n",
|
|
||||||
"from azureml.core.workspace import Workspace\n",
|
|
||||||
"from azureml.core.dataset import Dataset\n",
|
|
||||||
"from azureml.core.compute import AmlCompute\n",
|
|
||||||
"from azureml.core.compute import ComputeTarget\n",
|
|
||||||
"from azureml.core.run import Run\n",
|
|
||||||
"from azureml.widgets import RunDetails\n",
|
|
||||||
"from azureml.core.model import Model \n",
|
|
||||||
"from helper import run_inference, get_result_df\n",
|
|
||||||
"from azureml.train.automl import AutoMLConfig\n",
|
|
||||||
"from sklearn.datasets import fetch_20newsgroups"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"As part of the setup you have already created a <b>Workspace</b>. To run AutoML, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"ws = Workspace.from_config()\n",
|
|
||||||
"\n",
|
|
||||||
"# Choose an experiment name.\n",
|
|
||||||
"experiment_name = 'automl-classification-text-dnn'\n",
|
|
||||||
"\n",
|
|
||||||
"experiment = Experiment(ws, experiment_name)\n",
|
|
||||||
"\n",
|
|
||||||
"output = {}\n",
|
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
|
||||||
"output['Workspace Name'] = ws.name\n",
|
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
|
||||||
"output['Location'] = ws.location\n",
|
|
||||||
"output['Experiment Name'] = experiment.name\n",
|
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
|
||||||
"outputDf.T"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Set up a compute cluster\n",
|
|
||||||
"This section uses a user-provided compute cluster (named \"dnntext-cluster\" in this example). If a cluster with this name does not exist in the user's workspace, the below code will create a new cluster. You can choose the parameters of the cluster as mentioned in the comments.\n",
|
|
||||||
"\n",
|
|
||||||
"Whether you provide/select a CPU or GPU cluster, AutoML will choose the appropriate DNN for that setup - BiLSTM or BERT text featurizer will be included in the candidate featurizers on CPU and GPU respectively. If your goal is to obtain the most accurate model, we recommend you use GPU clusters since BERT featurizers usually outperform BiLSTM featurizers."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
|
||||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
|
||||||
"\n",
|
|
||||||
"num_nodes = 2\n",
|
|
||||||
"\n",
|
|
||||||
"# Choose a name for your cluster.\n",
|
|
||||||
"amlcompute_cluster_name = \"dnntext-cluster\"\n",
|
|
||||||
"\n",
|
|
||||||
"# Verify that cluster does not exist already\n",
|
|
||||||
"try:\n",
|
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
|
||||||
" print('Found existing cluster, use it.')\n",
|
|
||||||
"except ComputeTargetException:\n",
|
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_NC6\", # CPU for BiLSTM, such as \"STANDARD_D2_V2\" \n",
|
|
||||||
" # To use BERT (this is recommended for best performance), select a GPU such as \"STANDARD_NC6\" \n",
|
|
||||||
" # or similar GPU option\n",
|
|
||||||
" # available in your workspace\n",
|
|
||||||
" max_nodes = num_nodes)\n",
|
|
||||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
|
||||||
"\n",
|
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Get data\n",
|
|
||||||
"For this notebook we will use 20 Newsgroups data from scikit-learn. We filter the data to contain four classes and take a sample as training data. Please note that for accuracy improvement, more data is needed. For this notebook we provide a small-data example so that you can use this template to use with your larger sized data."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"data_dir = \"text-dnn-data\" # Local directory to store data\n",
|
|
||||||
"blobstore_datadir = data_dir # Blob store directory to store data in\n",
|
|
||||||
"target_column_name = 'y'\n",
|
|
||||||
"feature_column_name = 'X'\n",
|
|
||||||
"\n",
|
|
||||||
"def get_20newsgroups_data():\n",
|
|
||||||
" '''Fetches 20 Newsgroups data from scikit-learn\n",
|
|
||||||
" Returns them in form of pandas dataframes\n",
|
|
||||||
" '''\n",
|
|
||||||
" remove = ('headers', 'footers', 'quotes')\n",
|
|
||||||
" categories = [\n",
|
|
||||||
" 'rec.sport.baseball',\n",
|
|
||||||
" 'rec.sport.hockey',\n",
|
|
||||||
" 'comp.graphics',\n",
|
|
||||||
" 'sci.space',\n",
|
|
||||||
" ]\n",
|
|
||||||
"\n",
|
|
||||||
" data = fetch_20newsgroups(subset = 'train', categories = categories,\n",
|
|
||||||
" shuffle = True, random_state = 42,\n",
|
|
||||||
" remove = remove)\n",
|
|
||||||
" data = pd.DataFrame({feature_column_name: data.data, target_column_name: data.target})\n",
|
|
||||||
"\n",
|
|
||||||
" data_train = data[:200]\n",
|
|
||||||
" data_test = data[200:300] \n",
|
|
||||||
"\n",
|
|
||||||
" data_train = remove_blanks_20news(data_train, feature_column_name, target_column_name)\n",
|
|
||||||
" data_test = remove_blanks_20news(data_test, feature_column_name, target_column_name)\n",
|
|
||||||
" \n",
|
|
||||||
" return data_train, data_test\n",
|
|
||||||
" \n",
|
|
||||||
"def remove_blanks_20news(data, feature_column_name, target_column_name):\n",
|
|
||||||
" \n",
|
|
||||||
" data[feature_column_name] = data[feature_column_name].replace(r'\\n', ' ', regex=True).apply(lambda x: x.strip())\n",
|
|
||||||
" data = data[data[feature_column_name] != '']\n",
|
|
||||||
" \n",
|
|
||||||
" return data"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Fetch data and upload to datastore for use in training"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"data_train, data_test = get_20newsgroups_data()\n",
|
|
||||||
"\n",
|
|
||||||
"if not os.path.isdir(data_dir):\n",
|
|
||||||
" os.mkdir(data_dir)\n",
|
|
||||||
" \n",
|
|
||||||
"train_data_fname = data_dir + '/train_data.csv'\n",
|
|
||||||
"test_data_fname = data_dir + '/test_data.csv'\n",
|
|
||||||
"\n",
|
|
||||||
"data_train.to_csv(train_data_fname, index=False)\n",
|
|
||||||
"data_test.to_csv(test_data_fname, index=False)\n",
|
|
||||||
"\n",
|
|
||||||
"datastore = ws.get_default_datastore()\n",
|
|
||||||
"datastore.upload(src_dir=data_dir, target_path=blobstore_datadir,\n",
|
|
||||||
" overwrite=True)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, blobstore_datadir + '/train_data.csv')])"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Prepare AutoML run"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"This notebook uses the blocked_models parameter to exclude some models that can take a longer time to train on some text datasets. You can choose to remove models from the blocked_models list but you may need to increase the experiment_timeout_hours parameter value to get results."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"automl_settings = {\n",
|
|
||||||
" \"experiment_timeout_minutes\": 20,\n",
|
|
||||||
" \"primary_metric\": 'accuracy',\n",
|
|
||||||
" \"max_concurrent_iterations\": num_nodes, \n",
|
|
||||||
" \"max_cores_per_iteration\": -1,\n",
|
|
||||||
" \"enable_dnn\": True,\n",
|
|
||||||
" \"enable_early_stopping\": True,\n",
|
|
||||||
" \"validation_size\": 0.3,\n",
|
|
||||||
" \"verbosity\": logging.INFO,\n",
|
|
||||||
" \"enable_voting_ensemble\": False,\n",
|
|
||||||
" \"enable_stack_ensemble\": False,\n",
|
|
||||||
"}\n",
|
|
||||||
"\n",
|
|
||||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
|
||||||
" debug_log = 'automl_errors.log',\n",
|
|
||||||
" compute_target=compute_target,\n",
|
|
||||||
" training_data=train_dataset,\n",
|
|
||||||
" label_column_name=target_column_name,\n",
|
|
||||||
" blocked_models = ['LightGBM', 'XGBoostClassifier'],\n",
|
|
||||||
" **automl_settings\n",
|
|
||||||
" )"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Submit AutoML Run"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"automl_run = experiment.submit(automl_config, show_output=True)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"automl_run"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Displaying the run objects gives you links to the visual tools in the Azure Portal. Go try them!"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Retrieve the Best Model\n",
|
|
||||||
"Below we select the best model pipeline from our iterations, use it to test on test data on the same compute cluster."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"You can test the model locally to get a feel of the input/output. When the model contains BERT, this step will require pytorch and pytorch-transformers installed in your local environment. The exact versions of these packages can be found in the **automl_env.yml** file located in the local copy of your MachineLearningNotebooks folder here:\n",
|
|
||||||
"MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/automl_env.yml"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"best_run, fitted_model = automl_run.get_output()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"You can now see what text transformations are used to convert text data to features for this dataset, including deep learning transformations based on BiLSTM or Transformer (BERT is one implementation of a Transformer) models."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"text_transformations_used = []\n",
|
|
||||||
"for column_group in fitted_model.named_steps['datatransformer'].get_featurization_summary():\n",
|
|
||||||
" text_transformations_used.extend(column_group['Transformations'])\n",
|
|
||||||
"text_transformations_used"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Registering the best model\n",
|
|
||||||
"We now register the best fitted model from the AutoML Run for use in future deployments. "
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Get results stats, extract the best model from AutoML run, download and register the resultant best model"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"summary_df = get_result_df(automl_run)\n",
|
|
||||||
"best_dnn_run_id = summary_df['run_id'].iloc[0]\n",
|
|
||||||
"best_dnn_run = Run(experiment, best_dnn_run_id)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model_dir = 'Model' # Local folder where the model will be stored temporarily\n",
|
|
||||||
"if not os.path.isdir(model_dir):\n",
|
|
||||||
" os.mkdir(model_dir)\n",
|
|
||||||
" \n",
|
|
||||||
"best_dnn_run.download_file('outputs/model.pkl', model_dir + '/model.pkl')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Register the model in your Azure Machine Learning Workspace. If you previously registered a model, please make sure to delete it so as to replace it with this new model."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Register the model\n",
|
|
||||||
"model_name = 'textDNN-20News'\n",
|
|
||||||
"model = Model.register(model_path = model_dir + '/model.pkl',\n",
|
|
||||||
" model_name = model_name,\n",
|
|
||||||
" tags=None,\n",
|
|
||||||
" workspace=ws)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Evaluate on Test Data"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"We now use the best fitted model from the AutoML Run to make predictions on the test set. \n",
|
|
||||||
"\n",
|
|
||||||
"Test set schema should match that of the training set."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, blobstore_datadir + '/test_data.csv')])\n",
|
|
||||||
"\n",
|
|
||||||
"# preview the first 3 rows of the dataset\n",
|
|
||||||
"test_dataset.take(3).to_pandas_dataframe()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"script_folder = os.path.join(os.getcwd(), 'inference')\n",
|
|
||||||
"os.makedirs(script_folder, exist_ok=True)\n",
|
|
||||||
"shutil.copy('infer.py', script_folder)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run,\n",
|
|
||||||
" train_dataset, test_dataset, target_column_name, model_name)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Display computed metrics"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"test_run"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"RunDetails(test_run).show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"test_run.wait_for_completion()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"pd.Series(test_run.get_metrics())"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"authors": [
|
|
||||||
{
|
|
||||||
"name": "anshirga"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"compute": [
|
|
||||||
"AML Compute"
|
|
||||||
],
|
|
||||||
"datasets": [
|
|
||||||
"None"
|
|
||||||
],
|
|
||||||
"deployment": [
|
|
||||||
"None"
|
|
||||||
],
|
|
||||||
"exclude_from_index": false,
|
|
||||||
"framework": [
|
|
||||||
"None"
|
|
||||||
],
|
|
||||||
"friendly_name": "DNN Text Featurization",
|
|
||||||
"index_order": 2,
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3.6",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python36"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.6.7"
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"None"
|
|
||||||
],
|
|
||||||
"task": "Text featurization using DNNs for classification"
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 2
|
|
||||||
}
|
|
||||||
@@ -1,56 +0,0 @@
|
|||||||
import pandas as pd
|
|
||||||
from azureml.core import Environment
|
|
||||||
from azureml.train.estimator import Estimator
|
|
||||||
from azureml.core.run import Run
|
|
||||||
|
|
||||||
|
|
||||||
def run_inference(test_experiment, compute_target, script_folder, train_run,
|
|
||||||
train_dataset, test_dataset, target_column_name, model_name):
|
|
||||||
|
|
||||||
inference_env = train_run.get_environment()
|
|
||||||
|
|
||||||
est = Estimator(source_directory=script_folder,
|
|
||||||
entry_script='infer.py',
|
|
||||||
script_params={
|
|
||||||
'--target_column_name': target_column_name,
|
|
||||||
'--model_name': model_name
|
|
||||||
},
|
|
||||||
inputs=[
|
|
||||||
train_dataset.as_named_input('train_data'),
|
|
||||||
test_dataset.as_named_input('test_data')
|
|
||||||
],
|
|
||||||
compute_target=compute_target,
|
|
||||||
environment_definition=inference_env)
|
|
||||||
|
|
||||||
run = test_experiment.submit(
|
|
||||||
est, tags={
|
|
||||||
'training_run_id': train_run.id,
|
|
||||||
'run_algorithm': train_run.properties['run_algorithm'],
|
|
||||||
'valid_score': train_run.properties['score'],
|
|
||||||
'primary_metric': train_run.properties['primary_metric']
|
|
||||||
})
|
|
||||||
|
|
||||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
|
||||||
return run
|
|
||||||
|
|
||||||
|
|
||||||
def get_result_df(remote_run):
|
|
||||||
|
|
||||||
children = list(remote_run.get_children(recursive=True))
|
|
||||||
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm',
|
|
||||||
'primary_metric', 'Score'])
|
|
||||||
goal_minimize = False
|
|
||||||
for run in children:
|
|
||||||
if('run_algorithm' in run.properties and 'score' in run.properties):
|
|
||||||
summary_df[run.id] = [run.id, run.properties['run_algorithm'],
|
|
||||||
run.properties['primary_metric'],
|
|
||||||
float(run.properties['score'])]
|
|
||||||
if('goal' in run.properties):
|
|
||||||
goal_minimize = run.properties['goal'].split('_')[-1] == 'min'
|
|
||||||
|
|
||||||
summary_df = summary_df.T.sort_values(
|
|
||||||
'Score',
|
|
||||||
ascending=goal_minimize).drop_duplicates(['run_algorithm'])
|
|
||||||
summary_df = summary_df.set_index('run_algorithm')
|
|
||||||
|
|
||||||
return summary_df
|
|
||||||
@@ -1,60 +0,0 @@
|
|||||||
import argparse
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
from sklearn.externals import joblib
|
|
||||||
|
|
||||||
from azureml.automl.runtime.shared.score import scoring, constants
|
|
||||||
from azureml.core import Run
|
|
||||||
from azureml.core.model import Model
|
|
||||||
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument(
|
|
||||||
'--target_column_name', type=str, dest='target_column_name',
|
|
||||||
help='Target Column Name')
|
|
||||||
parser.add_argument(
|
|
||||||
'--model_name', type=str, dest='model_name',
|
|
||||||
help='Name of registered model')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
target_column_name = args.target_column_name
|
|
||||||
model_name = args.model_name
|
|
||||||
|
|
||||||
print('args passed are: ')
|
|
||||||
print('Target column name: ', target_column_name)
|
|
||||||
print('Name of registered model: ', model_name)
|
|
||||||
|
|
||||||
model_path = Model.get_model_path(model_name)
|
|
||||||
# deserialize the model file back into a sklearn model
|
|
||||||
model = joblib.load(model_path)
|
|
||||||
|
|
||||||
run = Run.get_context()
|
|
||||||
# get input dataset by name
|
|
||||||
test_dataset = run.input_datasets['test_data']
|
|
||||||
train_dataset = run.input_datasets['train_data']
|
|
||||||
|
|
||||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name]) \
|
|
||||||
.to_pandas_dataframe()
|
|
||||||
y_test_df = test_dataset.with_timestamp_columns(None) \
|
|
||||||
.keep_columns(columns=[target_column_name]) \
|
|
||||||
.to_pandas_dataframe()
|
|
||||||
y_train_df = test_dataset.with_timestamp_columns(None) \
|
|
||||||
.keep_columns(columns=[target_column_name]) \
|
|
||||||
.to_pandas_dataframe()
|
|
||||||
|
|
||||||
predicted = model.predict_proba(X_test_df)
|
|
||||||
|
|
||||||
# Use the AutoML scoring module
|
|
||||||
class_labels = np.unique(np.concatenate((y_train_df.values, y_test_df.values)))
|
|
||||||
train_labels = model.classes_
|
|
||||||
classification_metrics = list(constants.CLASSIFICATION_SCALAR_SET)
|
|
||||||
scores = scoring.score_classification(y_test_df.values, predicted,
|
|
||||||
classification_metrics,
|
|
||||||
class_labels, train_labels)
|
|
||||||
|
|
||||||
print("scores:")
|
|
||||||
print(scores)
|
|
||||||
|
|
||||||
for key, value in scores.items():
|
|
||||||
run.log(key, value)
|
|
||||||
@@ -4,7 +4,8 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
"Licensed under the MIT License."
|
"Licensed under the MIT License."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -12,7 +13,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
""
|
""
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -75,16 +76,6 @@
|
|||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -118,17 +109,18 @@
|
|||||||
"dstor = ws.get_default_datastore()\n",
|
"dstor = ws.get_default_datastore()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Choose a name for the run history container in the workspace.\n",
|
"# Choose a name for the run history container in the workspace.\n",
|
||||||
"experiment_name = 'retrain-noaaweather'\n",
|
"experiment_name = \"retrain-noaaweather\"\n",
|
||||||
"experiment = Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = {}\n",
|
"output = {}\n",
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
"output['Workspace'] = ws.name\n",
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output['Run History Name'] = experiment_name\n",
|
"output[\"Run History Name\"] = experiment_name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -141,6 +133,9 @@
|
|||||||
"#### Create or Attach existing AmlCompute\n",
|
"#### Create or Attach existing AmlCompute\n",
|
||||||
"\n",
|
"\n",
|
||||||
"You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
"You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||||
|
"\n",
|
||||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||||
@@ -161,12 +156,12 @@
|
|||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
" max_nodes=4)\n",
|
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||||
|
" )\n",
|
||||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||||
"\n",
|
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -193,12 +188,19 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"conda_run_config.environment.docker.enabled = True\n",
|
"conda_run_config.environment.docker.enabled = True\n",
|
||||||
"\n",
|
"\n",
|
||||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'applicationinsights', 'azureml-opendatasets', 'azureml-defaults'], \n",
|
"cd = CondaDependencies.create(\n",
|
||||||
" conda_packages=['numpy==1.16.2'], \n",
|
" pip_packages=[\n",
|
||||||
" pin_sdk_version=False)\n",
|
" \"azureml-sdk[automl]\",\n",
|
||||||
|
" \"applicationinsights\",\n",
|
||||||
|
" \"azureml-opendatasets\",\n",
|
||||||
|
" \"azureml-defaults\",\n",
|
||||||
|
" ],\n",
|
||||||
|
" conda_packages=[\"numpy==1.19.5\"],\n",
|
||||||
|
" pin_sdk_version=False,\n",
|
||||||
|
")\n",
|
||||||
"conda_run_config.environment.python.conda_dependencies = cd\n",
|
"conda_run_config.environment.python.conda_dependencies = cd\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print('run config is ready')"
|
"print(\"run config is ready\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -215,7 +217,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# The name and target column of the Dataset to create \n",
|
"# The name and target column of the Dataset to create\n",
|
||||||
"dataset = \"NOAA-Weather-DS4\"\n",
|
"dataset = \"NOAA-Weather-DS4\"\n",
|
||||||
"target_column_name = \"temperature\""
|
"target_column_name = \"temperature\""
|
||||||
]
|
]
|
||||||
@@ -239,12 +241,14 @@
|
|||||||
"from azureml.pipeline.steps import PythonScriptStep\n",
|
"from azureml.pipeline.steps import PythonScriptStep\n",
|
||||||
"\n",
|
"\n",
|
||||||
"ds_name = PipelineParameter(name=\"ds_name\", default_value=dataset)\n",
|
"ds_name = PipelineParameter(name=\"ds_name\", default_value=dataset)\n",
|
||||||
"upload_data_step = PythonScriptStep(script_name=\"upload_weather_data.py\", \n",
|
"upload_data_step = PythonScriptStep(\n",
|
||||||
" allow_reuse=False,\n",
|
" script_name=\"upload_weather_data.py\",\n",
|
||||||
" name=\"upload_weather_data\",\n",
|
" allow_reuse=False,\n",
|
||||||
" arguments=[\"--ds_name\", ds_name],\n",
|
" name=\"upload_weather_data\",\n",
|
||||||
" compute_target=compute_target, \n",
|
" arguments=[\"--ds_name\", ds_name],\n",
|
||||||
" runconfig=conda_run_config)"
|
" compute_target=compute_target,\n",
|
||||||
|
" runconfig=conda_run_config,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -261,10 +265,11 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"data_pipeline = Pipeline(\n",
|
"data_pipeline = Pipeline(\n",
|
||||||
" description=\"pipeline_with_uploaddata\",\n",
|
" description=\"pipeline_with_uploaddata\", workspace=ws, steps=[upload_data_step]\n",
|
||||||
" workspace=ws, \n",
|
")\n",
|
||||||
" steps=[upload_data_step])\n",
|
"data_pipeline_run = experiment.submit(\n",
|
||||||
"data_pipeline_run = experiment.submit(data_pipeline, pipeline_parameters={\"ds_name\":dataset})"
|
" data_pipeline, pipeline_parameters={\"ds_name\": dataset}\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -304,13 +309,14 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"data_prep_step = PythonScriptStep(script_name=\"check_data.py\", \n",
|
"data_prep_step = PythonScriptStep(\n",
|
||||||
" allow_reuse=False,\n",
|
" script_name=\"check_data.py\",\n",
|
||||||
" name=\"check_data\",\n",
|
" allow_reuse=False,\n",
|
||||||
" arguments=[\"--ds_name\", ds_name,\n",
|
" name=\"check_data\",\n",
|
||||||
" \"--model_name\", model_name],\n",
|
" arguments=[\"--ds_name\", ds_name, \"--model_name\", model_name],\n",
|
||||||
" compute_target=compute_target, \n",
|
" compute_target=compute_target,\n",
|
||||||
" runconfig=conda_run_config)"
|
" runconfig=conda_run_config,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -320,6 +326,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.core import Dataset\n",
|
"from azureml.core import Dataset\n",
|
||||||
|
"\n",
|
||||||
"train_ds = Dataset.get_by_name(ws, dataset)\n",
|
"train_ds = Dataset.get_by_name(ws, dataset)\n",
|
||||||
"train_ds = train_ds.drop_columns([\"partition_date\"])"
|
"train_ds = train_ds.drop_columns([\"partition_date\"])"
|
||||||
]
|
]
|
||||||
@@ -345,21 +352,22 @@
|
|||||||
" \"iteration_timeout_minutes\": 10,\n",
|
" \"iteration_timeout_minutes\": 10,\n",
|
||||||
" \"experiment_timeout_hours\": 0.25,\n",
|
" \"experiment_timeout_hours\": 0.25,\n",
|
||||||
" \"n_cross_validations\": 3,\n",
|
" \"n_cross_validations\": 3,\n",
|
||||||
" \"primary_metric\": 'r2_score',\n",
|
" \"primary_metric\": \"r2_score\",\n",
|
||||||
" \"max_concurrent_iterations\": 3,\n",
|
" \"max_concurrent_iterations\": 3,\n",
|
||||||
" \"max_cores_per_iteration\": -1,\n",
|
" \"max_cores_per_iteration\": -1,\n",
|
||||||
" \"verbosity\": logging.INFO,\n",
|
" \"verbosity\": logging.INFO,\n",
|
||||||
" \"enable_early_stopping\": True\n",
|
" \"enable_early_stopping\": True,\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task = 'regression',\n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" debug_log = 'automl_errors.log',\n",
|
" task=\"regression\",\n",
|
||||||
" path = \".\",\n",
|
" debug_log=\"automl_errors.log\",\n",
|
||||||
" compute_target=compute_target,\n",
|
" path=\".\",\n",
|
||||||
" training_data = train_ds,\n",
|
" compute_target=compute_target,\n",
|
||||||
" label_column_name = target_column_name,\n",
|
" training_data=train_ds,\n",
|
||||||
" **automl_settings\n",
|
" label_column_name=target_column_name,\n",
|
||||||
" )"
|
" **automl_settings,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -370,17 +378,21 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from azureml.pipeline.core import PipelineData, TrainingOutput\n",
|
"from azureml.pipeline.core import PipelineData, TrainingOutput\n",
|
||||||
"\n",
|
"\n",
|
||||||
"metrics_output_name = 'metrics_output'\n",
|
"metrics_output_name = \"metrics_output\"\n",
|
||||||
"best_model_output_name = 'best_model_output'\n",
|
"best_model_output_name = \"best_model_output\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"metrics_data = PipelineData(name='metrics_data',\n",
|
"metrics_data = PipelineData(\n",
|
||||||
" datastore=dstor,\n",
|
" name=\"metrics_data\",\n",
|
||||||
" pipeline_output_name=metrics_output_name,\n",
|
" datastore=dstor,\n",
|
||||||
" training_output=TrainingOutput(type='Metrics'))\n",
|
" pipeline_output_name=metrics_output_name,\n",
|
||||||
"model_data = PipelineData(name='model_data',\n",
|
" training_output=TrainingOutput(type=\"Metrics\"),\n",
|
||||||
" datastore=dstor,\n",
|
")\n",
|
||||||
" pipeline_output_name=best_model_output_name,\n",
|
"model_data = PipelineData(\n",
|
||||||
" training_output=TrainingOutput(type='Model'))"
|
" name=\"model_data\",\n",
|
||||||
|
" datastore=dstor,\n",
|
||||||
|
" pipeline_output_name=best_model_output_name,\n",
|
||||||
|
" training_output=TrainingOutput(type=\"Model\"),\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -390,10 +402,11 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"automl_step = AutoMLStep(\n",
|
"automl_step = AutoMLStep(\n",
|
||||||
" name='automl_module',\n",
|
" name=\"automl_module\",\n",
|
||||||
" automl_config=automl_config,\n",
|
" automl_config=automl_config,\n",
|
||||||
" outputs=[metrics_data, model_data],\n",
|
" outputs=[metrics_data, model_data],\n",
|
||||||
" allow_reuse=False)"
|
" allow_reuse=False,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -410,13 +423,22 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"register_model_step = PythonScriptStep(script_name=\"register_model.py\",\n",
|
"register_model_step = PythonScriptStep(\n",
|
||||||
" name=\"register_model\",\n",
|
" script_name=\"register_model.py\",\n",
|
||||||
" allow_reuse=False,\n",
|
" name=\"register_model\",\n",
|
||||||
" arguments=[\"--model_name\", model_name, \"--model_path\", model_data, \"--ds_name\", ds_name],\n",
|
" allow_reuse=False,\n",
|
||||||
" inputs=[model_data],\n",
|
" arguments=[\n",
|
||||||
" compute_target=compute_target,\n",
|
" \"--model_name\",\n",
|
||||||
" runconfig=conda_run_config)"
|
" model_name,\n",
|
||||||
|
" \"--model_path\",\n",
|
||||||
|
" model_data,\n",
|
||||||
|
" \"--ds_name\",\n",
|
||||||
|
" ds_name,\n",
|
||||||
|
" ],\n",
|
||||||
|
" inputs=[model_data],\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" runconfig=conda_run_config,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -434,8 +456,9 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"training_pipeline = Pipeline(\n",
|
"training_pipeline = Pipeline(\n",
|
||||||
" description=\"training_pipeline\",\n",
|
" description=\"training_pipeline\",\n",
|
||||||
" workspace=ws, \n",
|
" workspace=ws,\n",
|
||||||
" steps=[data_prep_step, automl_step, register_model_step])"
|
" steps=[data_prep_step, automl_step, register_model_step],\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -444,8 +467,10 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"training_pipeline_run = experiment.submit(training_pipeline, pipeline_parameters={\n",
|
"training_pipeline_run = experiment.submit(\n",
|
||||||
" \"ds_name\": dataset, \"model_name\": \"noaaweatherds\"})"
|
" training_pipeline,\n",
|
||||||
|
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -474,8 +499,8 @@
|
|||||||
"pipeline_name = \"Retraining-Pipeline-NOAAWeather\"\n",
|
"pipeline_name = \"Retraining-Pipeline-NOAAWeather\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"published_pipeline = training_pipeline.publish(\n",
|
"published_pipeline = training_pipeline.publish(\n",
|
||||||
" name=pipeline_name, \n",
|
" name=pipeline_name, description=\"Pipeline that retrains AutoML model\"\n",
|
||||||
" description=\"Pipeline that retrains AutoML model\")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"published_pipeline"
|
"published_pipeline"
|
||||||
]
|
]
|
||||||
@@ -487,13 +512,17 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.pipeline.core import Schedule\n",
|
"from azureml.pipeline.core import Schedule\n",
|
||||||
"schedule = Schedule.create(workspace=ws, name=\"RetrainingSchedule\",\n",
|
"\n",
|
||||||
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
|
"schedule = Schedule.create(\n",
|
||||||
" pipeline_id=published_pipeline.id, \n",
|
" workspace=ws,\n",
|
||||||
" experiment_name=experiment_name, \n",
|
" name=\"RetrainingSchedule\",\n",
|
||||||
" datastore=dstor,\n",
|
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
|
||||||
" wait_for_provisioning=True,\n",
|
" pipeline_id=published_pipeline.id,\n",
|
||||||
" polling_interval=1440)"
|
" experiment_name=experiment_name,\n",
|
||||||
|
" datastore=dstor,\n",
|
||||||
|
" wait_for_provisioning=True,\n",
|
||||||
|
" polling_interval=1440,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -517,8 +546,8 @@
|
|||||||
"pipeline_name = \"DataIngestion-Pipeline-NOAAWeather\"\n",
|
"pipeline_name = \"DataIngestion-Pipeline-NOAAWeather\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"published_pipeline = training_pipeline.publish(\n",
|
"published_pipeline = training_pipeline.publish(\n",
|
||||||
" name=pipeline_name, \n",
|
" name=pipeline_name, description=\"Pipeline that updates NOAAWeather Dataset\"\n",
|
||||||
" description=\"Pipeline that updates NOAAWeather Dataset\")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"published_pipeline"
|
"published_pipeline"
|
||||||
]
|
]
|
||||||
@@ -530,13 +559,17 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.pipeline.core import Schedule\n",
|
"from azureml.pipeline.core import Schedule\n",
|
||||||
"schedule = Schedule.create(workspace=ws, name=\"RetrainingSchedule-DataIngestion\",\n",
|
"\n",
|
||||||
" pipeline_parameters={\"ds_name\":dataset},\n",
|
"schedule = Schedule.create(\n",
|
||||||
" pipeline_id=published_pipeline.id, \n",
|
" workspace=ws,\n",
|
||||||
" experiment_name=experiment_name, \n",
|
" name=\"RetrainingSchedule-DataIngestion\",\n",
|
||||||
" datastore=dstor,\n",
|
" pipeline_parameters={\"ds_name\": dataset},\n",
|
||||||
" wait_for_provisioning=True,\n",
|
" pipeline_id=published_pipeline.id,\n",
|
||||||
" polling_interval=1440)"
|
" experiment_name=experiment_name,\n",
|
||||||
|
" datastore=dstor,\n",
|
||||||
|
" wait_for_provisioning=True,\n",
|
||||||
|
" polling_interval=1440,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@@ -547,9 +580,9 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.6",
|
"display_name": "Python 3.8 - AzureML",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python36"
|
"name": "python38-azureml"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-continuous-retraining
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -31,12 +31,15 @@ try:
|
|||||||
model = Model(ws, args.model_name)
|
model = Model(ws, args.model_name)
|
||||||
last_train_time = model.created_time
|
last_train_time = model.created_time
|
||||||
print("Model was last trained on {0}.".format(last_train_time))
|
print("Model was last trained on {0}.".format(last_train_time))
|
||||||
except Exception as e:
|
except Exception:
|
||||||
print("Could not get last model train time.")
|
print("Could not get last model train time.")
|
||||||
last_train_time = datetime.min.replace(tzinfo=pytz.UTC)
|
last_train_time = datetime.min.replace(tzinfo=pytz.UTC)
|
||||||
|
|
||||||
train_ds = Dataset.get_by_name(ws, args.ds_name)
|
train_ds = Dataset.get_by_name(ws, args.ds_name)
|
||||||
dataset_changed_time = train_ds.data_changed_time
|
dataset_changed_time = train_ds.data_changed_time.replace(tzinfo=pytz.UTC)
|
||||||
|
|
||||||
|
print("dataset_changed_time=" + str(dataset_changed_time))
|
||||||
|
print("last_train_time=" + str(last_train_time))
|
||||||
|
|
||||||
if not dataset_changed_time > last_train_time:
|
if not dataset_changed_time > last_train_time:
|
||||||
print("Cancelling run since there is no new data.")
|
print("Cancelling run since there is no new data.")
|
||||||
|
|||||||
@@ -25,9 +25,11 @@ datasets = [(Dataset.Scenario.TRAINING, train_ds)]
|
|||||||
|
|
||||||
# Register model with training dataset
|
# Register model with training dataset
|
||||||
|
|
||||||
model = Model.register(workspace=ws,
|
model = Model.register(
|
||||||
model_path=args.model_path,
|
workspace=ws,
|
||||||
model_name=args.model_name,
|
model_path=args.model_path,
|
||||||
datasets=datasets)
|
model_name=args.model_name,
|
||||||
|
datasets=datasets,
|
||||||
|
)
|
||||||
|
|
||||||
print("Registered version {0} of model {1}".format(model.version, model.name))
|
print("Registered version {0} of model {1}".format(model.version, model.name))
|
||||||
|
|||||||
@@ -16,26 +16,82 @@ if type(run) == _OfflineRun:
|
|||||||
else:
|
else:
|
||||||
ws = run.experiment.workspace
|
ws = run.experiment.workspace
|
||||||
|
|
||||||
usaf_list = ['725724', '722149', '723090', '722159', '723910', '720279',
|
usaf_list = [
|
||||||
'725513', '725254', '726430', '720381', '723074', '726682',
|
"725724",
|
||||||
'725486', '727883', '723177', '722075', '723086', '724053',
|
"722149",
|
||||||
'725070', '722073', '726060', '725224', '725260', '724520',
|
"723090",
|
||||||
'720305', '724020', '726510', '725126', '722523', '703333',
|
"722159",
|
||||||
'722249', '722728', '725483', '722972', '724975', '742079',
|
"723910",
|
||||||
'727468', '722193', '725624', '722030', '726380', '720309',
|
"720279",
|
||||||
'722071', '720326', '725415', '724504', '725665', '725424',
|
"725513",
|
||||||
'725066']
|
"725254",
|
||||||
|
"726430",
|
||||||
|
"720381",
|
||||||
|
"723074",
|
||||||
|
"726682",
|
||||||
|
"725486",
|
||||||
|
"727883",
|
||||||
|
"723177",
|
||||||
|
"722075",
|
||||||
|
"723086",
|
||||||
|
"724053",
|
||||||
|
"725070",
|
||||||
|
"722073",
|
||||||
|
"726060",
|
||||||
|
"725224",
|
||||||
|
"725260",
|
||||||
|
"724520",
|
||||||
|
"720305",
|
||||||
|
"724020",
|
||||||
|
"726510",
|
||||||
|
"725126",
|
||||||
|
"722523",
|
||||||
|
"703333",
|
||||||
|
"722249",
|
||||||
|
"722728",
|
||||||
|
"725483",
|
||||||
|
"722972",
|
||||||
|
"724975",
|
||||||
|
"742079",
|
||||||
|
"727468",
|
||||||
|
"722193",
|
||||||
|
"725624",
|
||||||
|
"722030",
|
||||||
|
"726380",
|
||||||
|
"720309",
|
||||||
|
"722071",
|
||||||
|
"720326",
|
||||||
|
"725415",
|
||||||
|
"724504",
|
||||||
|
"725665",
|
||||||
|
"725424",
|
||||||
|
"725066",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def get_noaa_data(start_time, end_time):
|
def get_noaa_data(start_time, end_time):
|
||||||
columns = ['usaf', 'wban', 'datetime', 'latitude', 'longitude', 'elevation',
|
columns = [
|
||||||
'windAngle', 'windSpeed', 'temperature', 'stationName', 'p_k']
|
"usaf",
|
||||||
|
"wban",
|
||||||
|
"datetime",
|
||||||
|
"latitude",
|
||||||
|
"longitude",
|
||||||
|
"elevation",
|
||||||
|
"windAngle",
|
||||||
|
"windSpeed",
|
||||||
|
"temperature",
|
||||||
|
"stationName",
|
||||||
|
"p_k",
|
||||||
|
]
|
||||||
isd = NoaaIsdWeather(start_time, end_time, cols=columns)
|
isd = NoaaIsdWeather(start_time, end_time, cols=columns)
|
||||||
noaa_df = isd.to_pandas_dataframe()
|
noaa_df = isd.to_pandas_dataframe()
|
||||||
df_filtered = noaa_df[noaa_df["usaf"].isin(usaf_list)]
|
df_filtered = noaa_df[noaa_df["usaf"].isin(usaf_list)]
|
||||||
df_filtered.reset_index(drop=True)
|
df_filtered.reset_index(drop=True)
|
||||||
print("Received {0} rows of training data between {1} and {2}".format(
|
print(
|
||||||
df_filtered.shape[0], start_time, end_time))
|
"Received {0} rows of training data between {1} and {2}".format(
|
||||||
|
df_filtered.shape[0], start_time, end_time
|
||||||
|
)
|
||||||
|
)
|
||||||
return df_filtered
|
return df_filtered
|
||||||
|
|
||||||
|
|
||||||
@@ -49,41 +105,57 @@ print("Argument 1(ds_name): %s" % args.ds_name)
|
|||||||
|
|
||||||
dstor = ws.get_default_datastore()
|
dstor = ws.get_default_datastore()
|
||||||
register_dataset = False
|
register_dataset = False
|
||||||
|
end_time = datetime.utcnow()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ds = Dataset.get_by_name(ws, args.ds_name)
|
ds = Dataset.get_by_name(ws, args.ds_name)
|
||||||
end_time_last_slice = ds.data_changed_time.replace(tzinfo=None)
|
end_time_last_slice = ds.data_changed_time.replace(tzinfo=None)
|
||||||
print("Dataset {0} last updated on {1}".format(args.ds_name,
|
print("Dataset {0} last updated on {1}".format(args.ds_name, end_time_last_slice))
|
||||||
end_time_last_slice))
|
except Exception:
|
||||||
except Exception as e:
|
|
||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
print("Dataset with name {0} not found, registering new dataset.".format(args.ds_name))
|
print(
|
||||||
|
"Dataset with name {0} not found, registering new dataset.".format(args.ds_name)
|
||||||
|
)
|
||||||
register_dataset = True
|
register_dataset = True
|
||||||
end_time_last_slice = datetime.today() - relativedelta(weeks=2)
|
end_time = datetime(2021, 5, 1, 0, 0)
|
||||||
|
end_time_last_slice = end_time - relativedelta(weeks=2)
|
||||||
|
|
||||||
end_time = datetime.utcnow()
|
try:
|
||||||
train_df = get_noaa_data(end_time_last_slice, end_time)
|
train_df = get_noaa_data(end_time_last_slice, end_time)
|
||||||
|
except Exception as ex:
|
||||||
|
print("get_noaa_data failed:", ex)
|
||||||
|
train_df = None
|
||||||
|
|
||||||
if train_df.size > 0:
|
if train_df is not None and train_df.size > 0:
|
||||||
print("Received {0} rows of new data after {0}.".format(
|
print(
|
||||||
train_df.shape[0], end_time_last_slice))
|
"Received {0} rows of new data after {1}.".format(
|
||||||
folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(args.ds_name, end_time.year,
|
train_df.shape[0], end_time_last_slice
|
||||||
end_time.month, end_time.day,
|
)
|
||||||
end_time.hour, end_time.minute,
|
)
|
||||||
end_time.second)
|
folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(
|
||||||
|
args.ds_name,
|
||||||
|
end_time.year,
|
||||||
|
end_time.month,
|
||||||
|
end_time.day,
|
||||||
|
end_time.hour,
|
||||||
|
end_time.minute,
|
||||||
|
end_time.second,
|
||||||
|
)
|
||||||
file_path = "{0}/data.csv".format(folder_name)
|
file_path = "{0}/data.csv".format(folder_name)
|
||||||
|
|
||||||
# Add a new partition to the registered dataset
|
# Add a new partition to the registered dataset
|
||||||
os.makedirs(folder_name, exist_ok=True)
|
os.makedirs(folder_name, exist_ok=True)
|
||||||
train_df.to_csv(file_path, index=False)
|
train_df.to_csv(file_path, index=False)
|
||||||
|
|
||||||
dstor.upload_files(files=[file_path],
|
dstor.upload_files(
|
||||||
target_path=folder_name,
|
files=[file_path], target_path=folder_name, overwrite=True, show_progress=True
|
||||||
overwrite=True,
|
)
|
||||||
show_progress=True)
|
|
||||||
else:
|
else:
|
||||||
print("No new data since {0}.".format(end_time_last_slice))
|
print("No new data since {0}.".format(end_time_last_slice))
|
||||||
|
|
||||||
if register_dataset:
|
if register_dataset:
|
||||||
ds = Dataset.Tabular.from_delimited_files(dstor.path("{}/**/*.csv".format(
|
ds = Dataset.Tabular.from_delimited_files(
|
||||||
args.ds_name)), partition_format='/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv')
|
dstor.path("{}/**/*.csv".format(args.ds_name)),
|
||||||
|
partition_format="/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv",
|
||||||
|
)
|
||||||
ds.register(ws, name=args.ds_name)
|
ds.register(ws, name=args.ds_name)
|
||||||
|
|||||||
@@ -0,0 +1,346 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Automated Machine Learning - Codegen for AutoFeaturization \n",
|
||||||
|
"_**Autofeaturization of credit card fraudulent transactions dataset on remote compute and codegen functionality**_\n",
|
||||||
|
"\n",
|
||||||
|
"## Contents\n",
|
||||||
|
"1. [Introduction](#Introduction)\n",
|
||||||
|
"1. [Setup](#Setup)\n",
|
||||||
|
"1. [Data](#Data)\n",
|
||||||
|
"1. [Autofeaturization](#Autofeaturization)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Introduction'></a>\n",
|
||||||
|
"## Introduction"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"**Autofeaturization** lets you run an AutoML experiment to only featurize the datasets. These datasets along with the transformer are stored in AML Storage and linked to the run which can later be retrieved and used to train models. \n",
|
||||||
|
"\n",
|
||||||
|
"**To run Autofeaturization, set the number of iterations to zero and featurization as auto.**\n",
|
||||||
|
"\n",
|
||||||
|
"Please refer to [Autofeaturization and custom model training](../autofeaturization-custom-model-training/custom-model-training-from-autofeaturization-run.ipynb) for more details on the same.\n",
|
||||||
|
"\n",
|
||||||
|
"[Codegen](https://github.com/Azure/automl-codegen-preview) is a feature, which when enabled, provides a user with the script of the underlying functionality and a notebook to tweak inputs or code and rerun the same.\n",
|
||||||
|
"\n",
|
||||||
|
"In this example we use the credit card fraudulent transactions dataset to showcase how you can use AutoML for autofeaturization and further how you can enable the `Codegen` feature.\n",
|
||||||
|
"\n",
|
||||||
|
"This notebook is using remote compute to complete the featurization.\n",
|
||||||
|
"\n",
|
||||||
|
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../configuration.ipynb) notebook first if you haven't already, to establish your connection to the AzureML Workspace. \n",
|
||||||
|
"\n",
|
||||||
|
"Here you will learn how to create an autofeaturization experiment using an existing workspace with codegen feature enabled."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Setup'></a>\n",
|
||||||
|
"## Setup\n",
|
||||||
|
"\n",
|
||||||
|
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import logging\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"from azureml.core.experiment import Experiment\n",
|
||||||
|
"from azureml.core.workspace import Workspace\n",
|
||||||
|
"from azureml.core.dataset import Dataset\n",
|
||||||
|
"from azureml.train.automl import AutoMLConfig"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(\"This notebook was created using version 1.55.0 of the Azure ML SDK\")\n",
|
||||||
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"\n",
|
||||||
|
"# choose a name for experiment\n",
|
||||||
|
"experiment_name = 'automl-autofeaturization-ccard-codegen-remote'\n",
|
||||||
|
"\n",
|
||||||
|
"experiment=Experiment(ws, experiment_name)\n",
|
||||||
|
"\n",
|
||||||
|
"output = {}\n",
|
||||||
|
"output['Subscription ID'] = ws.subscription_id\n",
|
||||||
|
"output['Workspace'] = ws.name\n",
|
||||||
|
"output['Resource Group'] = ws.resource_group\n",
|
||||||
|
"output['Location'] = ws.location\n",
|
||||||
|
"output['Experiment Name'] = experiment.name\n",
|
||||||
|
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||||
|
"outputDf.T"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Create or Attach existing AmlCompute\n",
|
||||||
|
"A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||||
|
"\n",
|
||||||
|
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||||
|
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||||
|
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
|
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||||
|
"\n",
|
||||||
|
"# Choose a name for your CPU cluster\n",
|
||||||
|
"cpu_cluster_name = \"cpu-cluster\"\n",
|
||||||
|
"\n",
|
||||||
|
"# Verify that cluster does not exist already\n",
|
||||||
|
"try:\n",
|
||||||
|
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||||
|
" print('Found existing cluster, use it.')\n",
|
||||||
|
"except ComputeTargetException:\n",
|
||||||
|
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||||
|
" max_nodes=6)\n",
|
||||||
|
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||||
|
"\n",
|
||||||
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Data'></a>\n",
|
||||||
|
"## Data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Load Data\n",
|
||||||
|
"\n",
|
||||||
|
"Load the credit card fraudulent transactions dataset from a CSV file, containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. \n",
|
||||||
|
"\n",
|
||||||
|
"Here the autofeaturization run will featurize the training data passed in."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"##### Training Dataset"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"training_data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard_train.csv\"\n",
|
||||||
|
"training_dataset = Dataset.Tabular.from_delimited_files(training_data) # Tabular dataset\n",
|
||||||
|
"\n",
|
||||||
|
"label_column_name = 'Class' # output label"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Autofeaturization'></a>\n",
|
||||||
|
"## AutoFeaturization\n",
|
||||||
|
"\n",
|
||||||
|
"Instantiate an AutoMLConfig object. This defines the settings and data used to run the autofeaturization experiment.\n",
|
||||||
|
"\n",
|
||||||
|
"|Property|Description|\n",
|
||||||
|
"|-|-|\n",
|
||||||
|
"|**task**|classification or regression or forecasting|\n",
|
||||||
|
"|**training_data**|Input training dataset, containing both features and label column.|\n",
|
||||||
|
"|**iterations**|For an autofeaturization run, iterations will be 0.|\n",
|
||||||
|
"|**featurization**|For an autofeaturization run, featurization can be 'auto' or 'custom'.|\n",
|
||||||
|
"|**label_column_name**|The name of the label column.|\n",
|
||||||
|
"|**enable_code_generation**|For enabling codegen for the run, value would be True|\n",
|
||||||
|
"\n",
|
||||||
|
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||||
|
" debug_log = 'automl_errors.log',\n",
|
||||||
|
" iterations = 0, # autofeaturization run can be triggered by setting iterations to 0\n",
|
||||||
|
" compute_target = compute_target,\n",
|
||||||
|
" training_data = training_dataset,\n",
|
||||||
|
" label_column_name = label_column_name,\n",
|
||||||
|
" featurization = 'auto',\n",
|
||||||
|
" verbosity = logging.INFO,\n",
|
||||||
|
" enable_code_generation = True # enable codegen\n",
|
||||||
|
" )"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"remote_run = experiment.submit(automl_config, show_output = False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Results"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Widget for Monitoring Runs\n",
|
||||||
|
"\n",
|
||||||
|
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
|
||||||
|
"\n",
|
||||||
|
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"RunDetails(remote_run).show()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"remote_run.wait_for_completion(show_output=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Codegen Script and Notebook"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Codegen script and notebook can be found under the `Outputs + logs` section from the details page of the remote run. Please check for the `autofeaturization_notebook.ipynb` under `/outputs/generated_code`. To modify the featurization code, open `script.py` and make changes. The codegen notebook can be run with the same environment configuration as the above AutoML run."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Experiment Complete!"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "bhavanatumma"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "adb464b67752e4577e3dc163235ced27038d19b7d88def00d75d1975bde5d9ab"
|
||||||
|
},
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.8 - AzureML",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python38-azureml"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.9"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: codegen-for-autofeaturization
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -0,0 +1,729 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Automated Machine Learning - AutoFeaturization (Part 1)\n",
|
||||||
|
"_**Autofeaturization of credit card fraudulent transactions dataset on remote compute**_\n",
|
||||||
|
"\n",
|
||||||
|
"## Contents\n",
|
||||||
|
"1. [Introduction](#Introduction)\n",
|
||||||
|
"1. [Setup](#Setup)\n",
|
||||||
|
"1. [Data](#Data)\n",
|
||||||
|
"1. [Autofeaturization](#Autofeaturization)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Introduction'></a>\n",
|
||||||
|
"## Introduction"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Autofeaturization is a new feature to let you as the user run an AutoML experiment to only featurize the datasets. These datasets along with the transformer will be stored in the experiment which can later be retrieved and used to train models, either via AutoML or custom training. \n",
|
||||||
|
"\n",
|
||||||
|
"**To run Autofeaturization, pass in zero iterations and featurization as auto. This will featurize the datasets and terminate the experiment. Training will not occur.**\n",
|
||||||
|
"\n",
|
||||||
|
"*Limitations - Sparse data cannot be supported at the moment. Any dataset that has extensive categorical data might be featurized into sparse data which will not be allowed as input to AutoML. Efforts are underway to support sparse data and will be updated soon.* \n",
|
||||||
|
"\n",
|
||||||
|
"In this example we use the credit card fraudulent transactions dataset to showcase how you can use AutoML for autofeaturization. The goal is to clean and featurize the training dataset.\n",
|
||||||
|
"\n",
|
||||||
|
"This notebook is using remote compute to complete the featurization.\n",
|
||||||
|
"\n",
|
||||||
|
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../configuration.ipynb) notebook first if you haven't already, to establish your connection to the AzureML Workspace. \n",
|
||||||
|
"\n",
|
||||||
|
"In the below steps, you will learn how to:\n",
|
||||||
|
"1. Create an autofeaturization experiment using an existing workspace.\n",
|
||||||
|
"2. View the featurized datasets and transformer"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Setup'></a>\n",
|
||||||
|
"## Setup\n",
|
||||||
|
"\n",
|
||||||
|
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import logging\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"from azureml.core.experiment import Experiment\n",
|
||||||
|
"from azureml.core.workspace import Workspace\n",
|
||||||
|
"from azureml.core.dataset import Dataset\n",
|
||||||
|
"from azureml.train.automl import AutoMLConfig"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(\"This notebook was created using version 1.55.0 of the Azure ML SDK\")\n",
|
||||||
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"\n",
|
||||||
|
"# choose a name for experiment\n",
|
||||||
|
"experiment_name = 'automl-autofeaturization-ccard-remote'\n",
|
||||||
|
"\n",
|
||||||
|
"experiment=Experiment(ws, experiment_name)\n",
|
||||||
|
"\n",
|
||||||
|
"output = {}\n",
|
||||||
|
"output['Subscription ID'] = ws.subscription_id\n",
|
||||||
|
"output['Workspace'] = ws.name\n",
|
||||||
|
"output['Resource Group'] = ws.resource_group\n",
|
||||||
|
"output['Location'] = ws.location\n",
|
||||||
|
"output['Experiment Name'] = experiment.name\n",
|
||||||
|
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||||
|
"outputDf.T"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Create or Attach existing AmlCompute\n",
|
||||||
|
"A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||||
|
"\n",
|
||||||
|
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||||
|
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||||
|
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
|
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||||
|
"\n",
|
||||||
|
"# Choose a name for your CPU cluster\n",
|
||||||
|
"cpu_cluster_name = \"cpu-cluster\"\n",
|
||||||
|
"\n",
|
||||||
|
"# Verify that cluster does not exist already\n",
|
||||||
|
"try:\n",
|
||||||
|
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||||
|
" print('Found existing cluster, use it.')\n",
|
||||||
|
"except ComputeTargetException:\n",
|
||||||
|
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||||
|
" max_nodes=6)\n",
|
||||||
|
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||||
|
"\n",
|
||||||
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Data'></a>\n",
|
||||||
|
"## Data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Load Data\n",
|
||||||
|
"\n",
|
||||||
|
"Load the credit card fraudulent transactions dataset from a CSV file, containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. \n",
|
||||||
|
"\n",
|
||||||
|
"Here the autofeaturization run will featurize the training data passed in."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"##### Training Dataset"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"training_data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard_train.csv\"\n",
|
||||||
|
"training_dataset = Dataset.Tabular.from_delimited_files(training_data) # Tabular dataset\n",
|
||||||
|
"\n",
|
||||||
|
"label_column_name = 'Class' # output label"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Autofeaturization'></a>\n",
|
||||||
|
"## AutoFeaturization\n",
|
||||||
|
"\n",
|
||||||
|
"Instantiate an AutoMLConfig object. This defines the settings and data used to run the autofeaturization experiment.\n",
|
||||||
|
"\n",
|
||||||
|
"|Property|Description|\n",
|
||||||
|
"|-|-|\n",
|
||||||
|
"|**task**|classification or regression|\n",
|
||||||
|
"|**training_data**|Input training dataset, containing both features and label column.|\n",
|
||||||
|
"|**iterations**|For an autofeaturization run, iterations will be 0.|\n",
|
||||||
|
"|**featurization**|For an autofeaturization run, featurization will be 'auto'.|\n",
|
||||||
|
"|**label_column_name**|The name of the label column.|\n",
|
||||||
|
"\n",
|
||||||
|
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||||
|
" debug_log = 'automl_errors.log',\n",
|
||||||
|
" iterations = 0, # autofeaturization run can be triggered by setting iterations to 0\n",
|
||||||
|
" compute_target = compute_target,\n",
|
||||||
|
" training_data = training_dataset,\n",
|
||||||
|
" label_column_name = label_column_name,\n",
|
||||||
|
" featurization = 'auto',\n",
|
||||||
|
" verbosity = logging.INFO\n",
|
||||||
|
" )"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"remote_run = experiment.submit(automl_config, show_output = False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Transformer and Featurized Datasets\n",
|
||||||
|
"The given datasets have been featurized and stored under `Outputs + logs` from the details page of the remote run. The structure is shown below. The featurized dataset is stored under `/outputs/featurization/data` and the transformer is saved under `/outputs/featurization/pipeline` \n",
|
||||||
|
"\n",
|
||||||
|
"Below you will learn how to refer to the data saved in your run and retrieve the same."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Results"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Widget for Monitoring Runs\n",
|
||||||
|
"\n",
|
||||||
|
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
|
||||||
|
"\n",
|
||||||
|
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"RunDetails(remote_run).show()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"remote_run.wait_for_completion(show_output=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Automated Machine Learning - AutoFeaturization (Part 2)\n",
|
||||||
|
"_**Training using a custom model with the featurized data from Autofeaturization run of credit card fraudulent transactions dataset**_\n",
|
||||||
|
"\n",
|
||||||
|
"## Contents\n",
|
||||||
|
"1. [Introduction](#Introduction)\n",
|
||||||
|
"1. [Data Setup](#DataSetup)\n",
|
||||||
|
"1. [Autofeaturization Data](#AutofeaturizationData)\n",
|
||||||
|
"1. [Train](#Train)\n",
|
||||||
|
"1. [Results](#Results)\n",
|
||||||
|
"1. [Test](#Test)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Introduction'></a>\n",
|
||||||
|
"## Introduction\n",
|
||||||
|
"\n",
|
||||||
|
"Here we use the featurized dataset saved in the above run to showcase how you can perform custom training by using the transformer from an autofeaturization run to transform validation / test datasets. \n",
|
||||||
|
"\n",
|
||||||
|
"The goal is to use autofeaturized run data and transformer to transform and run a custom training experiment independently\n",
|
||||||
|
"\n",
|
||||||
|
"In the below steps, you will learn how to:\n",
|
||||||
|
"1. Read transformer from a completed autofeaturization run and transform data\n",
|
||||||
|
"2. Pull featurized data from a completed autofeaturization run\n",
|
||||||
|
"3. Run a custom training experiment with the above data\n",
|
||||||
|
"4. Check results"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='DataSetup'></a>\n",
|
||||||
|
"## Data Setup"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"We will load the featurized training data and also load the transformer from the above autofeaturized run. This transformer can then be used to transform the test data to check the accuracy of the custom model after training."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Load Test Data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"load test dataset from CSV and split into X and y columns to featurize with the transformer going forward."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"test_data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard_test.csv\"\n",
|
||||||
|
"\n",
|
||||||
|
"test_dataset = pd.read_csv(test_data)\n",
|
||||||
|
"label_column_name = 'Class'\n",
|
||||||
|
"\n",
|
||||||
|
"X_test_data = test_dataset[test_dataset.columns.difference([label_column_name])]\n",
|
||||||
|
"y_test_data = test_dataset[label_column_name].values\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Load data_transformer from the above remote run artifact"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### (Method 1)\n",
|
||||||
|
"\n",
|
||||||
|
"Method 1 allows you to read the transformer from the remote storage."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import mlflow\n",
|
||||||
|
"mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())\n",
|
||||||
|
"\n",
|
||||||
|
"# Set uri to fetch data transformer from remote parent run.\n",
|
||||||
|
"artifact_path = \"/outputs/featurization/pipeline/\"\n",
|
||||||
|
"uri = \"runs:/\" + remote_run.id + artifact_path\n",
|
||||||
|
"\n",
|
||||||
|
"print(uri)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### (Method 2)\n",
|
||||||
|
"\n",
|
||||||
|
"Method 2 downloads the transformer to the local directory and then can be used to transform the data. Uncomment to use."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"''' import pathlib\n",
|
||||||
|
"\n",
|
||||||
|
"# Download the transformer to the local directory\n",
|
||||||
|
"transformers_file_path = \"/outputs/featurization/pipeline/\"\n",
|
||||||
|
"local_path = \"./transformer\"\n",
|
||||||
|
"remote_run.download_files(prefix=transformers_file_path, output_directory=local_path, batch_size=500)\n",
|
||||||
|
"\n",
|
||||||
|
"path = pathlib.Path(\"transformer\") \n",
|
||||||
|
"path = str(path.absolute()) + transformers_file_path\n",
|
||||||
|
"str_uri = \"file:///\" + path\n",
|
||||||
|
"\n",
|
||||||
|
"print(str_uri) '''"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Transform Data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"**Note:** Not all datasets produce a y_transformer. The dataset used in the current notebook requires a transformer as the y column data is categorical. \n",
|
||||||
|
"\n",
|
||||||
|
"We will go ahead and download the mlflow transformer model and use it to transform test data that can be used for further experimentation below. To run the commented code, make sure the environment requirement is satisfied. You can go ahead and create the environment from the `conda.yaml` file under `/outputs/featurization/pipeline/` and run the given code in it."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"''' from azureml.automl.core.shared.constants import Transformers\n",
|
||||||
|
"\n",
|
||||||
|
"transformers = mlflow.sklearn.load_model(uri) # Using method 1\n",
|
||||||
|
"data_transformers = transformers.get_transformers()\n",
|
||||||
|
"x_transformer = data_transformers[Transformers.X_TRANSFORMER]\n",
|
||||||
|
"y_transformer = data_transformers[Transformers.Y_TRANSFORMER]\n",
|
||||||
|
"\n",
|
||||||
|
"X_test = x_transformer.transform(X_test_data)\n",
|
||||||
|
"y_test = y_transformer.transform(y_test_data) '''"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Run the following cell to see the featurization summary of X and y transformers. Uncomment to use. "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"''' X_data_summary = x_transformer.get_featurization_summary(is_user_friendly=False)\n",
|
||||||
|
"\n",
|
||||||
|
"summary_df = pd.DataFrame.from_records(X_data_summary)\n",
|
||||||
|
"summary_df '''"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Load Datastore\n",
|
||||||
|
"\n",
|
||||||
|
"The below data store holds the featurized datasets, hence we load and access the data. Check the path and file names according to the saved structure in your experiment `Outputs + logs` as seen in <i>Autofeaturization Part 1</i>"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.datastore import Datastore\n",
|
||||||
|
"\n",
|
||||||
|
"ds = Datastore.get(ws, \"workspaceartifactstore\")\n",
|
||||||
|
"experiment_loc = \"ExperimentRun/dcid.\" + remote_run.id\n",
|
||||||
|
"\n",
|
||||||
|
"remote_data_path = \"/outputs/featurization/data/\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='AutofeaturizationData'></a>\n",
|
||||||
|
"## Autofeaturization Data\n",
|
||||||
|
"\n",
|
||||||
|
"We will load the training data from the previously completed Autofeaturization experiment. The resulting featurized dataframe can be passed into the custom model for training. Here we are saving the file to local from the experiment storage and reading the data."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"train_data_file_path = \"full_training_dataset.df.parquet\"\n",
|
||||||
|
"local_data_path = \"./data/\" + train_data_file_path\n",
|
||||||
|
"\n",
|
||||||
|
"remote_run.download_file(remote_data_path + train_data_file_path, local_data_path)\n",
|
||||||
|
"\n",
|
||||||
|
"full_training_data = pd.read_parquet(local_data_path)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Another way to load the data is to go to the above autofeaturization experiment and check for the featurized dataset ids under `Output datasets`. Uncomment and replace them accordingly below, to use."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# train_data = Dataset.get_by_id(ws, 'cb4418ee-bac4-45ac-b055-600653bdf83a') # replace the featurized full_training_dataset id\n",
|
||||||
|
"# full_training_data = train_data.to_pandas_dataframe()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Training Data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"We are dropping the y column and weights column from the featurized training dataset."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"Y_COLUMN = \"automl_y\"\n",
|
||||||
|
"SW_COLUMN = \"automl_weights\"\n",
|
||||||
|
"\n",
|
||||||
|
"X_train = full_training_data[full_training_data.columns.difference([Y_COLUMN, SW_COLUMN])]\n",
|
||||||
|
"y_train = full_training_data[Y_COLUMN].values\n",
|
||||||
|
"sample_weight = full_training_data[SW_COLUMN].values"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Train'></a>\n",
|
||||||
|
"## Train"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Here we are passing our training data to the lightgbm classifier, any custom model can be used with your data. Let us first install lightgbm."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"! pip install lightgbm"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import lightgbm as lgb\n",
|
||||||
|
"\n",
|
||||||
|
"model = lgb.LGBMClassifier(learning_rate=0.08,max_depth=-5,random_state=42)\n",
|
||||||
|
"model.fit(X_train, y_train, sample_weight=sample_weight)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Once training is done, the test data obtained after transforming from the above downloaded transformer can be used to calculate the accuracy "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print('Training accuracy {:.4f}'.format(model.score(X_train, y_train)))\n",
|
||||||
|
"\n",
|
||||||
|
"# Uncomment below to test the model on test data \n",
|
||||||
|
"# print('Testing accuracy {:.4f}'.format(model.score(X_test, y_test)))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Results'></a>\n",
|
||||||
|
"## Analyze results\n",
|
||||||
|
"\n",
|
||||||
|
"### Retrieve the Model"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Test'></a>\n",
|
||||||
|
"## Test the fitted model\n",
|
||||||
|
"\n",
|
||||||
|
"Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Uncomment below to test the model on test data\n",
|
||||||
|
"# y_pred = model.predict(X_test)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Experiment Complete!"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "bhavanatumma"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "adb464b67752e4577e3dc163235ced27038d19b7d88def00d75d1975bde5d9ab"
|
||||||
|
},
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.8 - AzureML",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python38-azureml"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.9"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: custom-model-training-from-autofeaturization-run
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -5,7 +5,7 @@ set options=%3
|
|||||||
set PIP_NO_WARN_SCRIPT_LOCATION=0
|
set PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||||
|
|
||||||
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl_experimental"
|
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl_experimental"
|
||||||
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml"
|
IF "%automl_env_file%"=="" SET automl_env_file="automl_thin_client_env.yml"
|
||||||
|
|
||||||
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ fi
|
|||||||
|
|
||||||
if [ "$AUTOML_ENV_FILE" == "" ]
|
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||||
then
|
then
|
||||||
AUTOML_ENV_FILE="automl_env.yml"
|
AUTOML_ENV_FILE="automl_thin_client_env.yml"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -f $AUTOML_ENV_FILE ]; then
|
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ fi
|
|||||||
|
|
||||||
if [ "$AUTOML_ENV_FILE" == "" ]
|
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||||
then
|
then
|
||||||
AUTOML_ENV_FILE="automl_env.yml"
|
AUTOML_ENV_FILE="automl_thin_client_env_mac.yml"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -f $AUTOML_ENV_FILE ]; then
|
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||||
|
|||||||
@@ -1,17 +1,15 @@
|
|||||||
name: azure_automl_experimental
|
name: azure_automl_experimental
|
||||||
dependencies:
|
dependencies:
|
||||||
# The python interpreter version.
|
# The python interpreter version.
|
||||||
# Currently Azure ML only supports 3.5.2 and later.
|
# Currently Azure ML only supports 3.7.0 and later.
|
||||||
- pip<=19.3.1
|
- pip<=22.3.1
|
||||||
- python>=3.5.2,<3.8
|
- python>=3.7.0,<3.11
|
||||||
- nb_conda
|
|
||||||
- cython
|
|
||||||
- urllib3<1.24
|
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
- azureml-defaults
|
- azureml-defaults
|
||||||
- azureml-sdk
|
- azureml-sdk
|
||||||
- azureml-widgets
|
- azureml-widgets
|
||||||
|
- azureml-mlflow
|
||||||
- pandas
|
- pandas
|
||||||
- PyJWT < 2.0.0
|
- mlflow
|
||||||
|
|||||||
@@ -1,18 +1,24 @@
|
|||||||
name: azure_automl_experimental
|
name: azure_automl_experimental
|
||||||
|
channels:
|
||||||
|
- conda-forge
|
||||||
|
- main
|
||||||
dependencies:
|
dependencies:
|
||||||
# The python interpreter version.
|
# The python interpreter version.
|
||||||
# Currently Azure ML only supports 3.5.2 and later.
|
# Currently Azure ML only supports 3.7.0 and later.
|
||||||
- pip<=19.3.1
|
- pip<=20.2.4
|
||||||
- nomkl
|
- nomkl
|
||||||
- python>=3.5.2,<3.8
|
- python>=3.7.0,<3.11
|
||||||
- nb_conda
|
- urllib3==1.26.7
|
||||||
- cython
|
- PyJWT < 2.0.0
|
||||||
- urllib3<1.24
|
- numpy>=1.21.6,<=1.22.3
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
|
- azure-core==1.24.1
|
||||||
|
- azure-identity==1.7.0
|
||||||
- azureml-defaults
|
- azureml-defaults
|
||||||
- azureml-sdk
|
- azureml-sdk
|
||||||
- azureml-widgets
|
- azureml-widgets
|
||||||
|
- azureml-mlflow
|
||||||
- pandas
|
- pandas
|
||||||
- PyJWT < 2.0.0
|
- mlflow
|
||||||
|
|||||||
@@ -0,0 +1,420 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Automated Machine Learning\n",
|
||||||
|
"_**Classification of credit card fraudulent transactions on local managed compute **_\n",
|
||||||
|
"\n",
|
||||||
|
"## Contents\n",
|
||||||
|
"1. [Introduction](#Introduction)\n",
|
||||||
|
"1. [Setup](#Setup)\n",
|
||||||
|
"1. [Train](#Train)\n",
|
||||||
|
"1. [Results](#Results)\n",
|
||||||
|
"1. [Test](#Test)\n",
|
||||||
|
"1. [Acknowledgements](#Acknowledgements)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Introduction\n",
|
||||||
|
"\n",
|
||||||
|
"In this example we use the associated credit card dataset to showcase how you can use AutoML for a simple classification problem. The goal is to predict if a credit card transaction is considered a fraudulent charge.\n",
|
||||||
|
"\n",
|
||||||
|
"This notebook is using local managed compute to train the model.\n",
|
||||||
|
"\n",
|
||||||
|
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
|
||||||
|
"\n",
|
||||||
|
"In this notebook you will learn how to:\n",
|
||||||
|
"1. Create an experiment using an existing workspace.\n",
|
||||||
|
"2. Configure AutoML using `AutoMLConfig`.\n",
|
||||||
|
"3. Train the model using local managed compute.\n",
|
||||||
|
"4. Explore the results.\n",
|
||||||
|
"5. Test the fitted model."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Setup\n",
|
||||||
|
"\n",
|
||||||
|
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import logging\n",
|
||||||
|
"\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"from azureml.core.compute_target import LocalTarget\n",
|
||||||
|
"from azureml.core.experiment import Experiment\n",
|
||||||
|
"from azureml.core.workspace import Workspace\n",
|
||||||
|
"from azureml.core.dataset import Dataset\n",
|
||||||
|
"from azureml.train.automl import AutoMLConfig"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(\"This notebook was created using version 1.55.0 of the Azure ML SDK\")\n",
|
||||||
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"\n",
|
||||||
|
"# choose a name for experiment\n",
|
||||||
|
"experiment_name = 'automl-local-managed'\n",
|
||||||
|
"\n",
|
||||||
|
"experiment=Experiment(ws, experiment_name)\n",
|
||||||
|
"\n",
|
||||||
|
"output = {}\n",
|
||||||
|
"output['Subscription ID'] = ws.subscription_id\n",
|
||||||
|
"output['Workspace'] = ws.name\n",
|
||||||
|
"output['Resource Group'] = ws.resource_group\n",
|
||||||
|
"output['Location'] = ws.location\n",
|
||||||
|
"output['Experiment Name'] = experiment.name\n",
|
||||||
|
"pd.set_option('display.max_colwidth', None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||||
|
"outputDf.T"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Determine if local docker is configured for Linux images\n",
|
||||||
|
"\n",
|
||||||
|
"Local managed runs will leverage a Linux docker container to submit the run to. Due to this, the docker needs to be configured to use Linux containers."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Check if Docker is installed and Linux containers are enabled\n",
|
||||||
|
"import subprocess\n",
|
||||||
|
"from subprocess import CalledProcessError\n",
|
||||||
|
"try:\n",
|
||||||
|
" assert subprocess.run(\"docker -v\", shell=True).returncode == 0, 'Local Managed runs require docker to be installed.'\n",
|
||||||
|
" out = subprocess.check_output(\"docker system info\", shell=True).decode('ascii')\n",
|
||||||
|
" assert \"OSType: linux\" in out, 'Docker engine needs to be configured to use Linux containers.' \\\n",
|
||||||
|
" 'https://docs.docker.com/docker-for-windows/#switch-between-windows-and-linux-containers'\n",
|
||||||
|
"except CalledProcessError as ex:\n",
|
||||||
|
" raise Exception('Local Managed runs require docker to be installed.') from ex"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Load Data\n",
|
||||||
|
"\n",
|
||||||
|
"Load the credit card dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
|
||||||
|
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
||||||
|
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
||||||
|
"label_column_name = 'Class'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Train\n",
|
||||||
|
"\n",
|
||||||
|
"Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.\n",
|
||||||
|
"\n",
|
||||||
|
"|Property|Description|\n",
|
||||||
|
"|-|-|\n",
|
||||||
|
"|**task**|classification or regression|\n",
|
||||||
|
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
|
||||||
|
"|**enable_early_stopping**|Stop the run if the metric score is not showing improvement.|\n",
|
||||||
|
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||||
|
"|**training_data**|Input dataset, containing both features and label column.|\n",
|
||||||
|
"|**label_column_name**|The name of the label column.|\n",
|
||||||
|
"|**enable_local_managed**|Enable the experimental local-managed scenario.|\n",
|
||||||
|
"\n",
|
||||||
|
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"automl_settings = {\n",
|
||||||
|
" \"n_cross_validations\": 3,\n",
|
||||||
|
" \"primary_metric\": 'average_precision_score_weighted',\n",
|
||||||
|
" \"enable_early_stopping\": True,\n",
|
||||||
|
" \"experiment_timeout_hours\": 0.3, #for real scenarios we recommend a timeout of at least one hour \n",
|
||||||
|
" \"verbosity\": logging.INFO,\n",
|
||||||
|
"}\n",
|
||||||
|
"\n",
|
||||||
|
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||||
|
" debug_log = 'automl_errors.log',\n",
|
||||||
|
" compute_target = LocalTarget(),\n",
|
||||||
|
" enable_local_managed = True,\n",
|
||||||
|
" training_data = training_data,\n",
|
||||||
|
" label_column_name = label_column_name,\n",
|
||||||
|
" **automl_settings\n",
|
||||||
|
" )"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"parent_run = experiment.submit(automl_config, show_output = True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# If you need to retrieve a run that already started, use the following code\n",
|
||||||
|
"#from azureml.train.automl.run import AutoMLRun\n",
|
||||||
|
"#parent_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"parent_run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Results"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Explain model\n",
|
||||||
|
"\n",
|
||||||
|
"Automated ML models can be explained and visualized using the SDK Explainability library. "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Analyze results\n",
|
||||||
|
"\n",
|
||||||
|
"### Retrieve the Best Child Run\n",
|
||||||
|
"\n",
|
||||||
|
"Below we select the best pipeline from our iterations. The `get_best_child` method returns the best run. Overloads on `get_best_child` allow you to retrieve the best run for *any* logged metric."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"best_run = parent_run.get_best_child()\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Test the fitted model\n",
|
||||||
|
"\n",
|
||||||
|
"Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"X_test_df = validation_data.drop_columns(columns=[label_column_name])\n",
|
||||||
|
"y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Creating ModelProxy for submitting prediction runs to the training environment.\n",
|
||||||
|
"We will create a ModelProxy for the best child run, which will allow us to submit a run that does the prediction in the training environment. Unlike the local client, which can have different versions of some libraries, the training environment will have all the compatible libraries for the model already."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.train.automl.model_proxy import ModelProxy\n",
|
||||||
|
"best_model_proxy = ModelProxy(best_run)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# call the predict functions on the model proxy\n",
|
||||||
|
"y_pred = best_model_proxy.predict(X_test_df).to_pandas_dataframe()\n",
|
||||||
|
"y_pred"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Acknowledgements"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00c3\u0192\u00c2\u00a9 Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available on https://www.researchgate.net and the page of the DefeatFraud project\n",
|
||||||
|
"Please cite the following works: \n",
|
||||||
|
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tAndrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n",
|
||||||
|
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tDal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\n",
|
||||||
|
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tDal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n",
|
||||||
|
"o\tDal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\n",
|
||||||
|
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tCarcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00c3\u0192\u00c2\u00abl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\n",
|
||||||
|
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tCarcillo, Fabrizio; Le Borgne, Yann-A\u00c3\u0192\u00c2\u00abl; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "sekrupa"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"category": "tutorial",
|
||||||
|
"compute": [
|
||||||
|
"AML Compute"
|
||||||
|
],
|
||||||
|
"datasets": [
|
||||||
|
"Creditcard"
|
||||||
|
],
|
||||||
|
"deployment": [
|
||||||
|
"None"
|
||||||
|
],
|
||||||
|
"exclude_from_index": false,
|
||||||
|
"file_extension": ".py",
|
||||||
|
"framework": [
|
||||||
|
"None"
|
||||||
|
],
|
||||||
|
"friendly_name": "Classification of credit card fraudulent transactions using Automated ML",
|
||||||
|
"index_order": 5,
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.8 - AzureML",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python38-azureml"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.7"
|
||||||
|
},
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"tags": [
|
||||||
|
"AutomatedML"
|
||||||
|
],
|
||||||
|
"task": "Classification",
|
||||||
|
"version": "3.6.7"
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-classification-credit-card-fraud-local-managed
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -39,6 +39,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Introduction\n",
|
"## Introduction\n",
|
||||||
"In this example we use an experimental feature, Model Proxy, to do a predict on the best generated model without downloading the model locally. The prediction will happen on same compute and environment that was used to train the model. This feature is currently in the experimental state, which means that the API is prone to changing, please make sure to run on the latest version of this notebook if you face any issues.\n",
|
"In this example we use an experimental feature, Model Proxy, to do a predict on the best generated model without downloading the model locally. The prediction will happen on same compute and environment that was used to train the model. This feature is currently in the experimental state, which means that the API is prone to changing, please make sure to run on the latest version of this notebook if you face any issues.\n",
|
||||||
|
"This notebook will also leverage MLFlow for saving models, allowing for more portability of the resulting models. See https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-mlflow for more details around MLFlow is AzureML.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
|
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -90,7 +91,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.55.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -142,7 +143,7 @@
|
|||||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print('Found existing cluster, use it.')\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||||
" max_nodes=4)\n",
|
" max_nodes=4)\n",
|
||||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -179,6 +180,29 @@
|
|||||||
"label = \"ERP\"\n"
|
"label = \"ERP\"\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The split data will be used in the remote compute by ModelProxy and locally to compare results.\n",
|
||||||
|
"So, we need to persist the split data to avoid descrepencies from different package versions in the local and remote."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ds = ws.get_default_datastore()\n",
|
||||||
|
"\n",
|
||||||
|
"train_data = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||||
|
" train_data.to_pandas_dataframe(), target=(ds, \"machineTrainData\"), name=\"train_data\")\n",
|
||||||
|
"\n",
|
||||||
|
"test_data = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||||
|
" test_data.to_pandas_dataframe(), target=(ds, \"machineTestData\"), name=\"test_data\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -194,7 +218,6 @@
|
|||||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||||
"|**training_data**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
"|**training_data**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||||
"|**label_column_name**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
|
"|**label_column_name**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
|
||||||
"|**scenario**|We need to set this parameter to 'Latest' to enable some experimental features. This parameter should not be set outside of this experimental notebook.|\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||||
]
|
]
|
||||||
@@ -213,17 +236,17 @@
|
|||||||
" \"n_cross_validations\": 3,\n",
|
" \"n_cross_validations\": 3,\n",
|
||||||
" \"primary_metric\": 'r2_score',\n",
|
" \"primary_metric\": 'r2_score',\n",
|
||||||
" \"enable_early_stopping\": True, \n",
|
" \"enable_early_stopping\": True, \n",
|
||||||
" \"experiment_timeout_hours\": 0.3, #for real scenarios we reccommend a timeout of at least one hour \n",
|
" \"experiment_timeout_hours\": 0.3, #for real scenarios we recommend a timeout of at least one hour \n",
|
||||||
" \"max_concurrent_iterations\": 4,\n",
|
" \"max_concurrent_iterations\": 4,\n",
|
||||||
" \"max_cores_per_iteration\": -1,\n",
|
" \"max_cores_per_iteration\": -1,\n",
|
||||||
" \"verbosity\": logging.INFO,\n",
|
" \"verbosity\": logging.INFO,\n",
|
||||||
|
" \"save_mlflow\": True,\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task = 'regression',\n",
|
"automl_config = AutoMLConfig(task = 'regression',\n",
|
||||||
" compute_target = compute_target,\n",
|
" compute_target = compute_target,\n",
|
||||||
" training_data = train_data,\n",
|
" training_data = train_data,\n",
|
||||||
" label_column_name = label,\n",
|
" label_column_name = label,\n",
|
||||||
" scenario='Latest',\n",
|
|
||||||
" **automl_settings\n",
|
" **automl_settings\n",
|
||||||
" )"
|
" )"
|
||||||
]
|
]
|
||||||
@@ -304,7 +327,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"#### Show hyperparameters\n",
|
"#### Show hyperparameters\n",
|
||||||
"Show the model pipeline used for the best run with its hyperparameters."
|
"Show the model pipeline used for the best run with its hyperparameters.\n",
|
||||||
|
"For ensemble pipelines it shows the iterations and algorithms that are ensembled."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -313,8 +337,19 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"run_properties = json.loads(best_run.get_details()['properties']['pipeline_script'])\n",
|
"run_properties = best_run.get_details()['properties']\n",
|
||||||
"print(json.dumps(run_properties, indent = 1)) "
|
"pipeline_script = json.loads(run_properties['pipeline_script'])\n",
|
||||||
|
"print(json.dumps(pipeline_script, indent = 1)) \n",
|
||||||
|
"\n",
|
||||||
|
"if 'ensembled_iterations' in run_properties:\n",
|
||||||
|
" print(\"\")\n",
|
||||||
|
" print(\"Ensembled Iterations\")\n",
|
||||||
|
" print(run_properties['ensembled_iterations'])\n",
|
||||||
|
" \n",
|
||||||
|
"if 'ensembled_algorithms' in run_properties:\n",
|
||||||
|
" print(\"\")\n",
|
||||||
|
" print(\"Ensembled Algorithms\")\n",
|
||||||
|
" print(run_properties['ensembled_algorithms'])"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -413,9 +448,9 @@
|
|||||||
"automated-machine-learning"
|
"automated-machine-learning"
|
||||||
],
|
],
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.6",
|
"display_name": "Python 3.8 - AzureML",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python36"
|
"name": "python38-azureml"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-regression-model-proxy
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
After Width: | Height: | Size: 22 KiB |
@@ -0,0 +1,174 @@
|
|||||||
|
from typing import Any, Dict, Optional, List
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
from matplotlib.backends.backend_pdf import PdfPages
|
||||||
|
|
||||||
|
from azureml.automl.core.shared import constants
|
||||||
|
from azureml.automl.core.shared.types import GrainType
|
||||||
|
from azureml.automl.runtime.shared.score import scoring
|
||||||
|
|
||||||
|
GRAIN = "time_series_id"
|
||||||
|
BACKTEST_ITER = "backtest_iteration"
|
||||||
|
ACTUALS = "actual_level"
|
||||||
|
PREDICTIONS = "predicted_level"
|
||||||
|
ALL_GRAINS = "all_sets"
|
||||||
|
|
||||||
|
FORECASTS_FILE = "forecast.csv"
|
||||||
|
SCORES_FILE = "scores.csv"
|
||||||
|
PLOTS_FILE = "plots_fcst_vs_actual.pdf"
|
||||||
|
RE_INVALID_SYMBOLS = re.compile("[: ]")
|
||||||
|
|
||||||
|
|
||||||
|
def _compute_metrics(df: pd.DataFrame, metrics: List[str]):
|
||||||
|
"""
|
||||||
|
Compute metrics for one data frame.
|
||||||
|
|
||||||
|
:param df: The data frame which contains actual_level and predicted_level columns.
|
||||||
|
:return: The data frame with two columns - metric_name and metric.
|
||||||
|
"""
|
||||||
|
scores = scoring.score_regression(
|
||||||
|
y_test=df[ACTUALS], y_pred=df[PREDICTIONS], metrics=metrics
|
||||||
|
)
|
||||||
|
metrics_df = pd.DataFrame(list(scores.items()), columns=["metric_name", "metric"])
|
||||||
|
metrics_df.sort_values(["metric_name"], inplace=True)
|
||||||
|
metrics_df.reset_index(drop=True, inplace=True)
|
||||||
|
return metrics_df
|
||||||
|
|
||||||
|
|
||||||
|
def _format_grain_name(grain: GrainType) -> str:
|
||||||
|
"""
|
||||||
|
Convert grain name to string.
|
||||||
|
|
||||||
|
:param grain: the grain name.
|
||||||
|
:return: the string representation of the given grain.
|
||||||
|
"""
|
||||||
|
if not isinstance(grain, tuple) and not isinstance(grain, list):
|
||||||
|
return str(grain)
|
||||||
|
grain = list(map(str, grain))
|
||||||
|
return "|".join(grain)
|
||||||
|
|
||||||
|
|
||||||
|
def compute_all_metrics(
|
||||||
|
fcst_df: pd.DataFrame,
|
||||||
|
ts_id_colnames: List[str],
|
||||||
|
metric_names: Optional[List[set]] = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Calculate metrics per grain.
|
||||||
|
|
||||||
|
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
|
||||||
|
:param metric_names: (optional) the list of metric names to return
|
||||||
|
:param ts_id_colnames: (optional) list of grain column names
|
||||||
|
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
|
||||||
|
"""
|
||||||
|
if not metric_names:
|
||||||
|
metric_names = list(constants.Metric.SCALAR_REGRESSION_SET)
|
||||||
|
|
||||||
|
if ts_id_colnames is None:
|
||||||
|
ts_id_colnames = []
|
||||||
|
|
||||||
|
metrics_list = []
|
||||||
|
if ts_id_colnames:
|
||||||
|
for grain, df in fcst_df.groupby(ts_id_colnames):
|
||||||
|
one_grain_metrics_df = _compute_metrics(df, metric_names)
|
||||||
|
one_grain_metrics_df[GRAIN] = _format_grain_name(grain)
|
||||||
|
metrics_list.append(one_grain_metrics_df)
|
||||||
|
|
||||||
|
# overall metrics
|
||||||
|
one_grain_metrics_df = _compute_metrics(fcst_df, metric_names)
|
||||||
|
one_grain_metrics_df[GRAIN] = ALL_GRAINS
|
||||||
|
metrics_list.append(one_grain_metrics_df)
|
||||||
|
|
||||||
|
# collect into a data frame
|
||||||
|
return pd.concat(metrics_list)
|
||||||
|
|
||||||
|
|
||||||
|
def _draw_one_plot(
|
||||||
|
df: pd.DataFrame,
|
||||||
|
time_column_name: str,
|
||||||
|
grain_column_names: List[str],
|
||||||
|
pdf: PdfPages,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Draw the single plot.
|
||||||
|
|
||||||
|
:param df: The data frame with the data to build plot.
|
||||||
|
:param time_column_name: The name of a time column.
|
||||||
|
:param grain_column_names: The name of grain columns.
|
||||||
|
:param pdf: The pdf backend used to render the plot.
|
||||||
|
"""
|
||||||
|
fig, _ = plt.subplots(figsize=(20, 10))
|
||||||
|
df = df.set_index(time_column_name)
|
||||||
|
plt.plot(df[[ACTUALS, PREDICTIONS]])
|
||||||
|
plt.xticks(rotation=45)
|
||||||
|
iteration = df[BACKTEST_ITER].iloc[0]
|
||||||
|
if grain_column_names:
|
||||||
|
grain_name = [df[grain].iloc[0] for grain in grain_column_names]
|
||||||
|
plt.title(f"Time series ID: {_format_grain_name(grain_name)} {iteration}")
|
||||||
|
plt.legend(["actual", "forecast"])
|
||||||
|
plt.close(fig)
|
||||||
|
pdf.savefig(fig)
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_scores_and_build_plots(
|
||||||
|
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
|
||||||
|
):
|
||||||
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
|
grains = automl_settings.get(
|
||||||
|
constants.TimeSeries.TIME_SERIES_ID_COLUMN_NAMES,
|
||||||
|
automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES, None),
|
||||||
|
)
|
||||||
|
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
|
||||||
|
if grains is None:
|
||||||
|
grains = []
|
||||||
|
if isinstance(grains, str):
|
||||||
|
grains = [grains]
|
||||||
|
while BACKTEST_ITER in grains:
|
||||||
|
grains.remove(BACKTEST_ITER)
|
||||||
|
|
||||||
|
dfs = []
|
||||||
|
for fle in os.listdir(input_dir):
|
||||||
|
file_path = os.path.join(input_dir, fle)
|
||||||
|
if os.path.isfile(file_path) and file_path.endswith(".csv"):
|
||||||
|
df_iter = pd.read_csv(file_path, parse_dates=[time_column_name])
|
||||||
|
for _, iteration in df_iter.groupby(BACKTEST_ITER):
|
||||||
|
dfs.append(iteration)
|
||||||
|
forecast_df = pd.concat(dfs, sort=False, ignore_index=True)
|
||||||
|
# To make sure plots are in order, sort the predictions by grain and iteration.
|
||||||
|
ts_index = grains + [BACKTEST_ITER]
|
||||||
|
forecast_df.sort_values(by=ts_index, inplace=True)
|
||||||
|
pdf = PdfPages(os.path.join(output_dir, PLOTS_FILE))
|
||||||
|
for _, one_forecast in forecast_df.groupby(ts_index):
|
||||||
|
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
|
||||||
|
pdf.close()
|
||||||
|
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
|
||||||
|
# Remove np.NaN and np.inf from the prediction and actuals data.
|
||||||
|
forecast_df.replace([np.inf, -np.inf], np.nan, inplace=True)
|
||||||
|
forecast_df.dropna(subset=[ACTUALS, PREDICTIONS], inplace=True)
|
||||||
|
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
|
||||||
|
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
args = {"forecasts": "--forecasts", "scores_out": "--output-dir"}
|
||||||
|
parser = argparse.ArgumentParser("Parsing input arguments.")
|
||||||
|
for argname, arg in args.items():
|
||||||
|
parser.add_argument(arg, dest=argname, required=True)
|
||||||
|
parsed_args, _ = parser.parse_known_args()
|
||||||
|
input_dir = parsed_args.forecasts
|
||||||
|
output_dir = parsed_args.scores_out
|
||||||
|
with open(
|
||||||
|
os.path.join(
|
||||||
|
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
|
||||||
|
)
|
||||||
|
) as json_file:
|
||||||
|
automl_settings = json.load(json_file)
|
||||||
|
calculate_scores_and_build_plots(input_dir, output_dir, automl_settings)
|
||||||
@@ -0,0 +1,779 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Many Models with Backtesting - Automated ML\n",
|
||||||
|
"**_Backtest many models time series forecasts with Automated Machine Learning_**\n",
|
||||||
|
"\n",
|
||||||
|
"---"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"For this notebook we are using a synthetic dataset to demonstrate the back testing in many model scenario. This allows us to check historical performance of AutoML on a historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n",
|
||||||
|
"\n",
|
||||||
|
"Thus, it is a quick way of evaluating AutoML as if it was in production. Here, we do not test historical performance of a particular model, for this see the [notebook](../forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb). Instead, the best model for every backtest iteration can be different since AutoML chooses the best model for a given training set.\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Prerequisites\n",
|
||||||
|
"You'll need to create a compute Instance by following [these](https://learn.microsoft.com/en-us/azure/machine-learning/v1/how-to-create-manage-compute-instance?tabs=python) instructions."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## 1.0 Set up workspace, datastore, experiment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1613003526897
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os\n",
|
||||||
|
"\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"from azureml.core import Workspace, Datastore\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"\n",
|
||||||
|
"from pandas.tseries.frequencies import to_offset\n",
|
||||||
|
"\n",
|
||||||
|
"# Set up your workspace\n",
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"ws.get_details()\n",
|
||||||
|
"\n",
|
||||||
|
"# Set up your datastores\n",
|
||||||
|
"dstore = ws.get_default_datastore()\n",
|
||||||
|
"\n",
|
||||||
|
"output = {}\n",
|
||||||
|
"output[\"SDK version\"] = azureml.core.VERSION\n",
|
||||||
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
|
"output[\"Location\"] = ws.location\n",
|
||||||
|
"output[\"Default datastore name\"] = dstore.name\n",
|
||||||
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
|
"outputDf.T"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"This notebook is compatible with Azure ML SDK version 1.35.1 or later."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Choose an experiment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1613003540729
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core import Experiment\n",
|
||||||
|
"\n",
|
||||||
|
"experiment = Experiment(ws, \"automl-many-models-backtest\")\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"Experiment name: \" + experiment.name)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## 2.0 Data\n",
|
||||||
|
"\n",
|
||||||
|
"#### 2.1 Data generation\n",
|
||||||
|
"For this notebook we will generate the artificial data set with two [time series IDs](https://docs.microsoft.com/en-us/python/api/azureml-automl-core/azureml.automl.core.forecasting_parameters.forecastingparameters?view=azure-ml-py). Then we will generate backtest folds and will upload it to the default BLOB storage and create a [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# simulate data: 2 grains - 700\n",
|
||||||
|
"TIME_COLNAME = \"date\"\n",
|
||||||
|
"TARGET_COLNAME = \"value\"\n",
|
||||||
|
"TIME_SERIES_ID_COLNAME = \"ts_id\"\n",
|
||||||
|
"\n",
|
||||||
|
"sample_size = 700\n",
|
||||||
|
"# Set the random seed for reproducibility of results.\n",
|
||||||
|
"np.random.seed(20)\n",
|
||||||
|
"X1 = pd.DataFrame(\n",
|
||||||
|
" {\n",
|
||||||
|
" TIME_COLNAME: pd.date_range(start=\"2018-01-01\", periods=sample_size),\n",
|
||||||
|
" TARGET_COLNAME: np.random.normal(loc=100, scale=20, size=sample_size),\n",
|
||||||
|
" TIME_SERIES_ID_COLNAME: \"ts_A\",\n",
|
||||||
|
" }\n",
|
||||||
|
")\n",
|
||||||
|
"X2 = pd.DataFrame(\n",
|
||||||
|
" {\n",
|
||||||
|
" TIME_COLNAME: pd.date_range(start=\"2018-01-01\", periods=sample_size),\n",
|
||||||
|
" TARGET_COLNAME: np.random.normal(loc=100, scale=20, size=sample_size),\n",
|
||||||
|
" TIME_SERIES_ID_COLNAME: \"ts_B\",\n",
|
||||||
|
" }\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"X = pd.concat([X1, X2], ignore_index=True, sort=False)\n",
|
||||||
|
"print(\"Simulated dataset contains {} rows \\n\".format(X.shape[0]))\n",
|
||||||
|
"X.head()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Now we will generate 8 backtesting folds with backtesting period of 7 days and with the same forecasting horizon. We will add the column \"backtest_iteration\", which will identify the backtesting period by the last training date."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"offset_type = \"7D\"\n",
|
||||||
|
"NUMBER_OF_BACKTESTS = 8 # number of train/test sets to generate\n",
|
||||||
|
"\n",
|
||||||
|
"dfs_train = []\n",
|
||||||
|
"dfs_test = []\n",
|
||||||
|
"for ts_id, df_one in X.groupby(TIME_SERIES_ID_COLNAME):\n",
|
||||||
|
"\n",
|
||||||
|
" data_end = df_one[TIME_COLNAME].max()\n",
|
||||||
|
"\n",
|
||||||
|
" for i in range(NUMBER_OF_BACKTESTS):\n",
|
||||||
|
" train_cutoff_date = data_end - to_offset(offset_type)\n",
|
||||||
|
" df_one = df_one.copy()\n",
|
||||||
|
" df_one[\"backtest_iteration\"] = \"iteration_\" + str(train_cutoff_date)\n",
|
||||||
|
" train = df_one[df_one[TIME_COLNAME] <= train_cutoff_date]\n",
|
||||||
|
" test = df_one[\n",
|
||||||
|
" (df_one[TIME_COLNAME] > train_cutoff_date)\n",
|
||||||
|
" & (df_one[TIME_COLNAME] <= data_end)\n",
|
||||||
|
" ]\n",
|
||||||
|
" data_end = train[TIME_COLNAME].max()\n",
|
||||||
|
" dfs_train.append(train)\n",
|
||||||
|
" dfs_test.append(test)\n",
|
||||||
|
"\n",
|
||||||
|
"X_train = pd.concat(dfs_train, sort=False, ignore_index=True)\n",
|
||||||
|
"X_test = pd.concat(dfs_test, sort=False, ignore_index=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### 2.2 Create the Tabular Data Set.\n",
|
||||||
|
"\n",
|
||||||
|
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
|
||||||
|
"\n",
|
||||||
|
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.\n",
|
||||||
|
"\n",
|
||||||
|
"In this next step, we will upload the data and create a TabularDataset."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||||
|
"\n",
|
||||||
|
"ds = ws.get_default_datastore()\n",
|
||||||
|
"# Upload saved data to the default data store.\n",
|
||||||
|
"train_data = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||||
|
" X_train, target=(ds, \"data_mm\"), name=\"data_train\"\n",
|
||||||
|
")\n",
|
||||||
|
"test_data = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||||
|
" X_test, target=(ds, \"data_mm\"), name=\"data_test\"\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## 3.0 Build the training pipeline\n",
|
||||||
|
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Choose a compute target\n",
|
||||||
|
"\n",
|
||||||
|
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
|
||||||
|
"\n",
|
||||||
|
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1613007037308
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
|
"\n",
|
||||||
|
"# Name your cluster\n",
|
||||||
|
"compute_name = \"backtest-mm\"\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"if compute_name in ws.compute_targets:\n",
|
||||||
|
" compute_target = ws.compute_targets[compute_name]\n",
|
||||||
|
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||||
|
" print(\"Found compute target: \" + compute_name)\n",
|
||||||
|
"else:\n",
|
||||||
|
" print(\"Creating a new compute target...\")\n",
|
||||||
|
" provisioning_config = AmlCompute.provisioning_configuration(\n",
|
||||||
|
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||||
|
" )\n",
|
||||||
|
" # Create the compute target\n",
|
||||||
|
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
|
||||||
|
"\n",
|
||||||
|
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||||
|
" # If no min node count is provided it will use the scale settings for the cluster\n",
|
||||||
|
" compute_target.wait_for_completion(\n",
|
||||||
|
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
|
||||||
|
" )\n",
|
||||||
|
"\n",
|
||||||
|
" # For a more detailed view of current cluster status, use the 'status' property\n",
|
||||||
|
" print(compute_target.status.serialize())"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Set up training parameters\n",
|
||||||
|
"\n",
|
||||||
|
"We need to provide ``ForecastingParameters``, ``AutoMLConfig`` and ``ManyModelsTrainParameters`` objects. For the forecasting task we also need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name(s) definition.\n",
|
||||||
|
"\n",
|
||||||
|
"#### ``ForecastingParameters`` arguments\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
||||||
|
"| **time_column_name** | The name of your time column. |\n",
|
||||||
|
"| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
|
||||||
|
"| **cv_step_size** | Number of periods between two consecutive cross-validation folds. The default value is \\\"auto\\\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value. |\n",
|
||||||
|
"\n",
|
||||||
|
"#### ``AutoMLConfig`` arguments\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **task** | forecasting |\n",
|
||||||
|
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
|
||||||
|
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
|
||||||
|
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||||
|
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||||
|
"| **experiment_timeout_hours** | Maximum amount of time in hours that each experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. **It does not control the overall timeout for the pipeline run, instead controls the timeout for each training run per partitioned time series.** |\n",
|
||||||
|
"| **label_column_name** | The name of the label column. |\n",
|
||||||
|
"| **n_cross_validations** | Number of cross validation splits. The default value is \\\"auto\\\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
||||||
|
"| **enable_early_stopping** | Flag to enable early termination if the primary metric is no longer improving. |\n",
|
||||||
|
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
|
||||||
|
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
|
||||||
|
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"#### ``ManyModelsTrainParameters`` arguments\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **automl_settings** | The ``AutoMLConfig`` object defined above. |\n",
|
||||||
|
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1613007061544
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
|
||||||
|
" ManyModelsTrainParameters,\n",
|
||||||
|
")\n",
|
||||||
|
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||||
|
"from azureml.train.automl.automlconfig import AutoMLConfig\n",
|
||||||
|
"\n",
|
||||||
|
"partition_column_names = [TIME_SERIES_ID_COLNAME, \"backtest_iteration\"]\n",
|
||||||
|
"\n",
|
||||||
|
"forecasting_parameters = ForecastingParameters(\n",
|
||||||
|
" time_column_name=TIME_COLNAME,\n",
|
||||||
|
" forecast_horizon=6,\n",
|
||||||
|
" time_series_id_column_names=partition_column_names,\n",
|
||||||
|
" cv_step_size=\"auto\",\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"automl_settings = AutoMLConfig(\n",
|
||||||
|
" task=\"forecasting\",\n",
|
||||||
|
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||||
|
" iteration_timeout_minutes=10,\n",
|
||||||
|
" iterations=15,\n",
|
||||||
|
" experiment_timeout_hours=0.25,\n",
|
||||||
|
" label_column_name=TARGET_COLNAME,\n",
|
||||||
|
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||||
|
" track_child_runs=False,\n",
|
||||||
|
" forecasting_parameters=forecasting_parameters,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"mm_paramters = ManyModelsTrainParameters(\n",
|
||||||
|
" automl_settings=automl_settings, partition_column_names=partition_column_names\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Set up many models pipeline"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
|
||||||
|
"\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **experiment** | The experiment used for training. |\n",
|
||||||
|
"| **train_data** | The file dataset to be used as input to the training run. |\n",
|
||||||
|
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
|
||||||
|
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. |\n",
|
||||||
|
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
|
||||||
|
"| **run_invocation_timeout** | Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. This must be greater than ``experiment_timeout_hours`` by at least 300 seconds. |\n",
|
||||||
|
"\n",
|
||||||
|
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.\n",
|
||||||
|
"\n",
|
||||||
|
"**Note**: Total time taken for the **training step** in the pipeline to complete = $ \\frac{t}{ p \\times n } \\times ts $\n",
|
||||||
|
"where,\n",
|
||||||
|
"- $ t $ is time taken for training one partition (can be viewed in the training logs)\n",
|
||||||
|
"- $ p $ is ``process_count_per_node``\n",
|
||||||
|
"- $ n $ is ``node_count``\n",
|
||||||
|
"- $ ts $ is total number of partitions in time series based on ``partition_column_names``"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
|
||||||
|
" experiment=experiment,\n",
|
||||||
|
" train_data=train_data,\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" node_count=2,\n",
|
||||||
|
" process_count_per_node=2,\n",
|
||||||
|
" run_invocation_timeout=1200,\n",
|
||||||
|
" train_pipeline_parameters=mm_paramters,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.pipeline.core import Pipeline\n",
|
||||||
|
"\n",
|
||||||
|
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Submit the pipeline to run\n",
|
||||||
|
"Next we submit our pipeline to run. The whole training pipeline takes about 20 minutes using a STANDARD_DS12_V2 VM with our current ParallelRunConfig setting."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"training_run = experiment.submit(training_pipeline)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"training_run.wait_for_completion(show_output=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Check the run status, if training_run is in completed state, continue to next section. Otherwise, check the portal for failures."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## 4.0 Backtesting\n",
|
||||||
|
"Now that we selected the best AutoML model for each backtest fold, we will use these models to generate the forecasts and compare with the actuals."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Set up output dataset for inference data\n",
|
||||||
|
"Output of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset. "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.data import OutputFileDatasetConfig\n",
|
||||||
|
"\n",
|
||||||
|
"output_inference_data_ds = OutputFileDatasetConfig(\n",
|
||||||
|
" name=\"many_models_inference_output\",\n",
|
||||||
|
" destination=(dstore, \"backtesting/inference_data/\"),\n",
|
||||||
|
").register_on_complete(name=\"backtesting_data_ds\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"For many models we need to provide the ManyModelsInferenceParameters object.\n",
|
||||||
|
"\n",
|
||||||
|
"#### ``ManyModelsInferenceParameters`` arguments\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **partition_column_names** | List of column names that identifies groups. |\n",
|
||||||
|
"| **target_column_name** | \\[Optional] Column name only if the inference dataset has the target. |\n",
|
||||||
|
"| **time_column_name** | \\[Optional] Time column name only if it is timeseries. |\n",
|
||||||
|
"| **inference_type** | \\[Optional] Which inference method to use on the model. Possible values are 'forecast', 'predict_proba', and 'predict'. |\n",
|
||||||
|
"| **forecast_mode** | \\[Optional] The type of forecast to be used, either 'rolling' or 'recursive'; defaults to 'recursive'. |\n",
|
||||||
|
"| **step** | \\[Optional] Number of periods to advance the forecasting window in each iteration **(for rolling forecast only)**; defaults to 1. |\n",
|
||||||
|
"\n",
|
||||||
|
"#### ``get_many_models_batch_inference_steps`` arguments\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **experiment** | The experiment used for inference run. |\n",
|
||||||
|
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
|
||||||
|
"| **compute_target** | The compute target that runs the inference pipeline. |\n",
|
||||||
|
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
|
||||||
|
"| **process_count_per_node** | \\[Optional] The number of processes per node. By default it's 2 (should be at most half of the number of cores in a single node of the compute cluster that will be used for the experiment).\n",
|
||||||
|
"| **inference_pipeline_parameters** | \\[Optional] The ``ManyModelsInferenceParameters`` object defined above. |\n",
|
||||||
|
"| **append_row_file_name** | \\[Optional] The name of the output file (optional, default value is 'parallel_run_step.txt'). Supports 'txt' and 'csv' file extension. A 'txt' file extension generates the output in 'txt' format with space as separator without column names. A 'csv' file extension generates the output in 'csv' format with comma as separator and with column names. |\n",
|
||||||
|
"| **train_run_id** | \\[Optional] The run id of the **training pipeline**. By default it is the latest successful training pipeline run in the experiment. |\n",
|
||||||
|
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
|
||||||
|
"| **run_invocation_timeout** | \\[Optional] Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||||
|
"| **output_datastore** | \\[Optional] The ``Datastore`` or ``OutputDatasetConfig`` to be used for output. If specified any pipeline output will be written to that location. If unspecified the default datastore will be used. |\n",
|
||||||
|
"| **arguments** | \\[Optional] Arguments to be passed to inference script. Possible argument is '--forecast_quantiles' followed by quantile values. |"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||||
|
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
|
||||||
|
" ManyModelsInferenceParameters,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"mm_parameters = ManyModelsInferenceParameters(\n",
|
||||||
|
" partition_column_names=partition_column_names,\n",
|
||||||
|
" time_column_name=TIME_COLNAME,\n",
|
||||||
|
" target_column_name=TARGET_COLNAME,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"output_file_name = \"parallel_run_step.csv\"\n",
|
||||||
|
"\n",
|
||||||
|
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
|
||||||
|
" experiment=experiment,\n",
|
||||||
|
" inference_data=test_data,\n",
|
||||||
|
" node_count=2,\n",
|
||||||
|
" process_count_per_node=2,\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" run_invocation_timeout=300,\n",
|
||||||
|
" output_datastore=output_inference_data_ds,\n",
|
||||||
|
" train_run_id=training_run.id,\n",
|
||||||
|
" train_experiment_name=training_run.experiment.name,\n",
|
||||||
|
" inference_pipeline_parameters=mm_parameters,\n",
|
||||||
|
" append_row_file_name=output_file_name,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.pipeline.core import Pipeline\n",
|
||||||
|
"\n",
|
||||||
|
"inference_pipeline = Pipeline(ws, steps=inference_steps)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"inference_run = experiment.submit(inference_pipeline)\n",
|
||||||
|
"inference_run.wait_for_completion(show_output=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## 5.0 Retrieve results and calculate metrics\n",
|
||||||
|
"\n",
|
||||||
|
"The pipeline returns one file with the predictions for each times series ID and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. \n",
|
||||||
|
"\n",
|
||||||
|
"The next code snippet does the following:\n",
|
||||||
|
"1. Downloads the contents of the output folder that is passed in the parallel run step \n",
|
||||||
|
"2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe \n",
|
||||||
|
"3. Saves the table in csv format and \n",
|
||||||
|
"4. Displays the top 10 rows of the predictions"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n",
|
||||||
|
"\n",
|
||||||
|
"PREDICTION_COLNAME = \"Predictions\"\n",
|
||||||
|
"forecasting_results_name = \"forecasting_results\"\n",
|
||||||
|
"forecasting_output_name = \"many_models_inference_output\"\n",
|
||||||
|
"forecast_file = get_output_from_mm_pipeline(\n",
|
||||||
|
" inference_run, forecasting_results_name, forecasting_output_name, output_file_name\n",
|
||||||
|
")\n",
|
||||||
|
"df = pd.read_csv(forecast_file, parse_dates=[0])\n",
|
||||||
|
"print(\n",
|
||||||
|
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
|
||||||
|
")\n",
|
||||||
|
"# Save the csv file to read it in the next step.\n",
|
||||||
|
"df.rename(\n",
|
||||||
|
" columns={TARGET_COLNAME: \"actual_level\", PREDICTION_COLNAME: \"predicted_level\"},\n",
|
||||||
|
" inplace=True,\n",
|
||||||
|
")\n",
|
||||||
|
"df.to_csv(os.path.join(forecasting_results_name, \"forecast.csv\"), index=False)\n",
|
||||||
|
"df.head(10)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## View metrics\n",
|
||||||
|
"We will read in the obtained results and run the helper script, which will generate metrics and create the plots of predicted versus actual values."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from assets.score import calculate_scores_and_build_plots\n",
|
||||||
|
"\n",
|
||||||
|
"backtesting_results = \"backtesting_mm_results\"\n",
|
||||||
|
"os.makedirs(backtesting_results, exist_ok=True)\n",
|
||||||
|
"calculate_scores_and_build_plots(\n",
|
||||||
|
" forecasting_results_name,\n",
|
||||||
|
" backtesting_results,\n",
|
||||||
|
" automl_settings.as_serializable_dict(),\n",
|
||||||
|
")\n",
|
||||||
|
"pd.DataFrame({\"File\": os.listdir(backtesting_results)})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The directory contains a set of files with results:\n",
|
||||||
|
"- forecast.csv contains forecasts for all backtest iterations. The backtest_iteration column contains iteration identifier with the last training date as a suffix\n",
|
||||||
|
"- scores.csv contains all metrics. If data set contains several time series, the metrics are given for all combinations of time series id and iterations, as well as scores for all iterations and time series ids, which are marked as \"all_sets\"\n",
|
||||||
|
"- plots_fcst_vs_actual.pdf contains the predictions vs forecast plots for each iteration and, eash time series is saved as separate plot.\n",
|
||||||
|
"\n",
|
||||||
|
"For demonstration purposes we will display the table of metrics for one of the time series with ID \"ts0\". We will create the utility function, which will build the table with metrics."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def get_metrics_for_ts(all_metrics, ts):\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" Get the metrics for the time series with ID ts and return it as pandas data frame.\n",
|
||||||
|
"\n",
|
||||||
|
" :param all_metrics: The table with all the metrics.\n",
|
||||||
|
" :param ts: The ID of a time series of interest.\n",
|
||||||
|
" :return: The pandas DataFrame with metrics for one time series.\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" results_df = None\n",
|
||||||
|
" for ts_id, one_series in all_metrics.groupby(\"time_series_id\"):\n",
|
||||||
|
" if not ts_id.startswith(ts):\n",
|
||||||
|
" continue\n",
|
||||||
|
" iteration = ts_id.split(\"|\")[-1]\n",
|
||||||
|
" df = one_series[[\"metric_name\", \"metric\"]]\n",
|
||||||
|
" df.rename({\"metric\": iteration}, axis=1, inplace=True)\n",
|
||||||
|
" df.set_index(\"metric_name\", inplace=True)\n",
|
||||||
|
" if results_df is None:\n",
|
||||||
|
" results_df = df\n",
|
||||||
|
" else:\n",
|
||||||
|
" results_df = results_df.merge(\n",
|
||||||
|
" df, how=\"inner\", left_index=True, right_index=True\n",
|
||||||
|
" )\n",
|
||||||
|
" results_df.sort_index(axis=1, inplace=True)\n",
|
||||||
|
" return results_df\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"metrics_df = pd.read_csv(os.path.join(backtesting_results, \"scores.csv\"))\n",
|
||||||
|
"ts = \"ts_A\"\n",
|
||||||
|
"get_metrics_for_ts(metrics_df, ts)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Forecast vs actuals plots."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from IPython.display import IFrame\n",
|
||||||
|
"\n",
|
||||||
|
"IFrame(\"./backtesting_mm_results/plots_fcst_vs_actual.pdf\", width=800, height=300)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "jialiu"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"categories": [
|
||||||
|
"how-to-use-azureml",
|
||||||
|
"automated-machine-learning"
|
||||||
|
],
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.8 - AzureML",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python38-azureml"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.8.5"
|
||||||
|
},
|
||||||
|
"vscode": {
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-backtest-many-models
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
After Width: | Height: | Size: 22 KiB |
@@ -0,0 +1,45 @@
|
|||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
import azureml.train.automl.runtime._hts.hts_runtime_utilities as hru
|
||||||
|
|
||||||
|
from azureml.core import Run
|
||||||
|
from azureml.core.dataset import Dataset
|
||||||
|
|
||||||
|
# Parse the arguments.
|
||||||
|
args = {
|
||||||
|
"step_size": "--step-size",
|
||||||
|
"step_number": "--step-number",
|
||||||
|
"time_column_name": "--time-column-name",
|
||||||
|
"time_series_id_column_names": "--time-series-id-column-names",
|
||||||
|
"out_dir": "--output-dir",
|
||||||
|
}
|
||||||
|
parser = argparse.ArgumentParser("Parsing input arguments.")
|
||||||
|
for argname, arg in args.items():
|
||||||
|
parser.add_argument(arg, dest=argname, required=True)
|
||||||
|
parsed_args, _ = parser.parse_known_args()
|
||||||
|
step_number = int(parsed_args.step_number)
|
||||||
|
step_size = int(parsed_args.step_size)
|
||||||
|
# Create the working dirrectory to store the temporary csv files.
|
||||||
|
working_dir = parsed_args.out_dir
|
||||||
|
os.makedirs(working_dir, exist_ok=True)
|
||||||
|
# Set input and output
|
||||||
|
script_run = Run.get_context()
|
||||||
|
input_dataset = script_run.input_datasets["training_data"]
|
||||||
|
X_train = input_dataset.to_pandas_dataframe()
|
||||||
|
# Split the data.
|
||||||
|
for i in range(step_number):
|
||||||
|
file_name = os.path.join(working_dir, "backtest_{}.csv".format(i))
|
||||||
|
if parsed_args.time_series_id_column_names:
|
||||||
|
dfs = []
|
||||||
|
for _, one_series in X_train.groupby([parsed_args.time_series_id_column_names]):
|
||||||
|
one_series = one_series.sort_values(
|
||||||
|
by=[parsed_args.time_column_name], inplace=False
|
||||||
|
)
|
||||||
|
dfs.append(one_series.iloc[: len(one_series) - step_size * i])
|
||||||
|
pd.concat(dfs, sort=False, ignore_index=True).to_csv(file_name, index=False)
|
||||||
|
else:
|
||||||
|
X_train.sort_values(by=[parsed_args.time_column_name], inplace=True)
|
||||||
|
X_train.iloc[: len(X_train) - step_size * i].to_csv(file_name, index=False)
|
||||||
@@ -0,0 +1,178 @@
|
|||||||
|
# ---------------------------------------------------------
|
||||||
|
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||||
|
# ---------------------------------------------------------
|
||||||
|
"""The batch script needed for back testing of models using PRS."""
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import pickle
|
||||||
|
import re
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
from azureml.core.experiment import Experiment
|
||||||
|
from azureml.core.model import Model
|
||||||
|
from azureml.core.run import Run
|
||||||
|
from azureml.automl.core.shared import constants
|
||||||
|
from azureml.automl.runtime.shared.score import scoring
|
||||||
|
from azureml.train.automl import AutoMLConfig
|
||||||
|
|
||||||
|
RE_INVALID_SYMBOLS = re.compile(r"[:\s]")
|
||||||
|
|
||||||
|
model_name = None
|
||||||
|
target_column_name = None
|
||||||
|
current_step_run = None
|
||||||
|
output_dir = None
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_automl_settings():
|
||||||
|
with open(
|
||||||
|
os.path.join(
|
||||||
|
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
|
||||||
|
)
|
||||||
|
) as json_file:
|
||||||
|
return json.load(json_file)
|
||||||
|
|
||||||
|
|
||||||
|
def init():
|
||||||
|
global model_name
|
||||||
|
global target_column_name
|
||||||
|
global output_dir
|
||||||
|
global automl_settings
|
||||||
|
global model_uid
|
||||||
|
global forecast_quantiles
|
||||||
|
|
||||||
|
logger.info("Initialization of the run.")
|
||||||
|
parser = argparse.ArgumentParser("Parsing input arguments.")
|
||||||
|
parser.add_argument("--output-dir", dest="out", required=True)
|
||||||
|
parser.add_argument("--model-name", dest="model", default=None)
|
||||||
|
parser.add_argument("--model-uid", dest="model_uid", default=None)
|
||||||
|
parser.add_argument(
|
||||||
|
"--forecast_quantiles",
|
||||||
|
nargs="*",
|
||||||
|
type=float,
|
||||||
|
help="forecast quantiles list",
|
||||||
|
default=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
parsed_args, _ = parser.parse_known_args()
|
||||||
|
model_name = parsed_args.model
|
||||||
|
automl_settings = _get_automl_settings()
|
||||||
|
target_column_name = automl_settings.get("label_column_name")
|
||||||
|
output_dir = parsed_args.out
|
||||||
|
model_uid = parsed_args.model_uid
|
||||||
|
forecast_quantiles = parsed_args.forecast_quantiles
|
||||||
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
|
os.environ["AUTOML_IGNORE_PACKAGE_VERSION_INCOMPATIBILITIES".lower()] = "True"
|
||||||
|
|
||||||
|
|
||||||
|
def get_run():
|
||||||
|
global current_step_run
|
||||||
|
if current_step_run is None:
|
||||||
|
current_step_run = Run.get_context()
|
||||||
|
return current_step_run
|
||||||
|
|
||||||
|
|
||||||
|
def run_backtest(data_input_name: str, file_name: str, experiment: Experiment):
|
||||||
|
"""Re-train the model and return metrics."""
|
||||||
|
data_input = pd.read_csv(
|
||||||
|
data_input_name,
|
||||||
|
parse_dates=[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]],
|
||||||
|
)
|
||||||
|
print(data_input.head())
|
||||||
|
if not automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES):
|
||||||
|
# There is no grains.
|
||||||
|
data_input.sort_values(
|
||||||
|
[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]], inplace=True
|
||||||
|
)
|
||||||
|
X_train = data_input.iloc[: -automl_settings["max_horizon"]]
|
||||||
|
y_train = X_train.pop(target_column_name).values
|
||||||
|
X_test = data_input.iloc[-automl_settings["max_horizon"] :]
|
||||||
|
y_test = X_test.pop(target_column_name).values
|
||||||
|
else:
|
||||||
|
# The data contain grains.
|
||||||
|
dfs_train = []
|
||||||
|
dfs_test = []
|
||||||
|
for _, one_series in data_input.groupby(
|
||||||
|
automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
|
||||||
|
):
|
||||||
|
one_series.sort_values(
|
||||||
|
[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]], inplace=True
|
||||||
|
)
|
||||||
|
dfs_train.append(one_series.iloc[: -automl_settings["max_horizon"]])
|
||||||
|
dfs_test.append(one_series.iloc[-automl_settings["max_horizon"] :])
|
||||||
|
X_train = pd.concat(dfs_train, sort=False, ignore_index=True)
|
||||||
|
y_train = X_train.pop(target_column_name).values
|
||||||
|
X_test = pd.concat(dfs_test, sort=False, ignore_index=True)
|
||||||
|
y_test = X_test.pop(target_column_name).values
|
||||||
|
|
||||||
|
last_training_date = str(
|
||||||
|
X_train[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]].max()
|
||||||
|
)
|
||||||
|
|
||||||
|
if file_name:
|
||||||
|
# If file name is provided, we will load model and retrain it on backtest data.
|
||||||
|
with open(file_name, "rb") as fp:
|
||||||
|
fitted_model = pickle.load(fp)
|
||||||
|
fitted_model.fit(X_train, y_train)
|
||||||
|
else:
|
||||||
|
# We will run the experiment and select the best model.
|
||||||
|
X_train[target_column_name] = y_train
|
||||||
|
automl_config = AutoMLConfig(training_data=X_train, **automl_settings)
|
||||||
|
automl_run = current_step_run.submit_child(automl_config, show_output=True)
|
||||||
|
best_run, fitted_model = automl_run.get_output()
|
||||||
|
# As we have generated models, we need to register them for the future use.
|
||||||
|
description = "Backtest model example"
|
||||||
|
tags = {"last_training_date": last_training_date, "experiment": experiment.name}
|
||||||
|
if model_uid:
|
||||||
|
tags["model_uid"] = model_uid
|
||||||
|
automl_run.register_model(
|
||||||
|
model_name=best_run.properties["model_name"],
|
||||||
|
description=description,
|
||||||
|
tags=tags,
|
||||||
|
)
|
||||||
|
print(f"The model {best_run.properties['model_name']} was registered.")
|
||||||
|
|
||||||
|
# By default we will have forecast quantiles of 0.5, which is our target
|
||||||
|
if forecast_quantiles:
|
||||||
|
if 0.5 not in forecast_quantiles:
|
||||||
|
forecast_quantiles.append(0.5)
|
||||||
|
fitted_model.quantiles = forecast_quantiles
|
||||||
|
|
||||||
|
x_pred = fitted_model.forecast_quantiles(X_test)
|
||||||
|
x_pred["actual_level"] = y_test
|
||||||
|
x_pred["backtest_iteration"] = f"iteration_{last_training_date}"
|
||||||
|
x_pred.rename({0.5: "predicted_level"}, axis=1, inplace=True)
|
||||||
|
date_safe = RE_INVALID_SYMBOLS.sub("_", last_training_date)
|
||||||
|
|
||||||
|
x_pred.to_csv(os.path.join(output_dir, f"iteration_{date_safe}.csv"), index=False)
|
||||||
|
return x_pred
|
||||||
|
|
||||||
|
|
||||||
|
def run(input_files):
|
||||||
|
"""Run the script"""
|
||||||
|
logger.info("Running mini batch.")
|
||||||
|
ws = get_run().experiment.workspace
|
||||||
|
file_name = None
|
||||||
|
if model_name:
|
||||||
|
models = Model.list(ws, name=model_name)
|
||||||
|
cloud_model = None
|
||||||
|
if models:
|
||||||
|
for one_mod in models:
|
||||||
|
if cloud_model is None or one_mod.version > cloud_model.version:
|
||||||
|
logger.info(
|
||||||
|
"Using existing model from the workspace. Model version: {}".format(
|
||||||
|
one_mod.version
|
||||||
|
)
|
||||||
|
)
|
||||||
|
cloud_model = one_mod
|
||||||
|
file_name = cloud_model.download(exist_ok=True)
|
||||||
|
|
||||||
|
forecasts = []
|
||||||
|
logger.info("Running backtest.")
|
||||||
|
for input_file in input_files:
|
||||||
|
forecasts.append(run_backtest(input_file, file_name, get_run().experiment))
|
||||||
|
return pd.concat(forecasts)
|
||||||
@@ -0,0 +1,171 @@
|
|||||||
|
from typing import Any, Dict, Optional, List
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
from matplotlib.backends.backend_pdf import PdfPages
|
||||||
|
|
||||||
|
from azureml.automl.core.shared import constants
|
||||||
|
from azureml.automl.core.shared.types import GrainType
|
||||||
|
from azureml.automl.runtime.shared.score import scoring
|
||||||
|
|
||||||
|
GRAIN = "time_series_id"
|
||||||
|
BACKTEST_ITER = "backtest_iteration"
|
||||||
|
ACTUALS = "actual_level"
|
||||||
|
PREDICTIONS = "predicted_level"
|
||||||
|
ALL_GRAINS = "all_sets"
|
||||||
|
|
||||||
|
FORECASTS_FILE = "forecast.csv"
|
||||||
|
SCORES_FILE = "scores.csv"
|
||||||
|
PLOTS_FILE = "plots_fcst_vs_actual.pdf"
|
||||||
|
RE_INVALID_SYMBOLS = re.compile("[: ]")
|
||||||
|
|
||||||
|
|
||||||
|
def _compute_metrics(df: pd.DataFrame, metrics: List[str]):
|
||||||
|
"""
|
||||||
|
Compute metrics for one data frame.
|
||||||
|
|
||||||
|
:param df: The data frame which contains actual_level and predicted_level columns.
|
||||||
|
:return: The data frame with two columns - metric_name and metric.
|
||||||
|
"""
|
||||||
|
scores = scoring.score_regression(
|
||||||
|
y_test=df[ACTUALS], y_pred=df[PREDICTIONS], metrics=metrics
|
||||||
|
)
|
||||||
|
metrics_df = pd.DataFrame(list(scores.items()), columns=["metric_name", "metric"])
|
||||||
|
metrics_df.sort_values(["metric_name"], inplace=True)
|
||||||
|
metrics_df.reset_index(drop=True, inplace=True)
|
||||||
|
return metrics_df
|
||||||
|
|
||||||
|
|
||||||
|
def _format_grain_name(grain: GrainType) -> str:
|
||||||
|
"""
|
||||||
|
Convert grain name to string.
|
||||||
|
|
||||||
|
:param grain: the grain name.
|
||||||
|
:return: the string representation of the given grain.
|
||||||
|
"""
|
||||||
|
if not isinstance(grain, tuple) and not isinstance(grain, list):
|
||||||
|
return str(grain)
|
||||||
|
grain = list(map(str, grain))
|
||||||
|
return "|".join(grain)
|
||||||
|
|
||||||
|
|
||||||
|
def compute_all_metrics(
|
||||||
|
fcst_df: pd.DataFrame,
|
||||||
|
ts_id_colnames: List[str],
|
||||||
|
metric_names: Optional[List[set]] = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Calculate metrics per grain.
|
||||||
|
|
||||||
|
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
|
||||||
|
:param metric_names: (optional) the list of metric names to return
|
||||||
|
:param ts_id_colnames: (optional) list of grain column names
|
||||||
|
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
|
||||||
|
"""
|
||||||
|
if not metric_names:
|
||||||
|
metric_names = list(constants.Metric.SCALAR_REGRESSION_SET)
|
||||||
|
|
||||||
|
if ts_id_colnames is None:
|
||||||
|
ts_id_colnames = []
|
||||||
|
|
||||||
|
metrics_list = []
|
||||||
|
if ts_id_colnames:
|
||||||
|
for grain, df in fcst_df.groupby(ts_id_colnames):
|
||||||
|
one_grain_metrics_df = _compute_metrics(df, metric_names)
|
||||||
|
one_grain_metrics_df[GRAIN] = _format_grain_name(grain)
|
||||||
|
metrics_list.append(one_grain_metrics_df)
|
||||||
|
|
||||||
|
# overall metrics
|
||||||
|
one_grain_metrics_df = _compute_metrics(fcst_df, metric_names)
|
||||||
|
one_grain_metrics_df[GRAIN] = ALL_GRAINS
|
||||||
|
metrics_list.append(one_grain_metrics_df)
|
||||||
|
|
||||||
|
# collect into a data frame
|
||||||
|
return pd.concat(metrics_list)
|
||||||
|
|
||||||
|
|
||||||
|
def _draw_one_plot(
|
||||||
|
df: pd.DataFrame,
|
||||||
|
time_column_name: str,
|
||||||
|
grain_column_names: List[str],
|
||||||
|
pdf: PdfPages,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Draw the single plot.
|
||||||
|
|
||||||
|
:param df: The data frame with the data to build plot.
|
||||||
|
:param time_column_name: The name of a time column.
|
||||||
|
:param grain_column_names: The name of grain columns.
|
||||||
|
:param pdf: The pdf backend used to render the plot.
|
||||||
|
"""
|
||||||
|
fig, _ = plt.subplots(figsize=(20, 10))
|
||||||
|
df = df.set_index(time_column_name)
|
||||||
|
plt.plot(df[[ACTUALS, PREDICTIONS]])
|
||||||
|
plt.xticks(rotation=45)
|
||||||
|
iteration = df[BACKTEST_ITER].iloc[0]
|
||||||
|
if grain_column_names:
|
||||||
|
grain_name = [df[grain].iloc[0] for grain in grain_column_names]
|
||||||
|
plt.title(f"Time series ID: {_format_grain_name(grain_name)} {iteration}")
|
||||||
|
plt.legend(["actual", "forecast"])
|
||||||
|
plt.close(fig)
|
||||||
|
pdf.savefig(fig)
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_scores_and_build_plots(
|
||||||
|
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
|
||||||
|
):
|
||||||
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
|
grains = automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
|
||||||
|
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
|
||||||
|
if grains is None:
|
||||||
|
grains = []
|
||||||
|
if isinstance(grains, str):
|
||||||
|
grains = [grains]
|
||||||
|
while BACKTEST_ITER in grains:
|
||||||
|
grains.remove(BACKTEST_ITER)
|
||||||
|
|
||||||
|
dfs = []
|
||||||
|
for fle in os.listdir(input_dir):
|
||||||
|
file_path = os.path.join(input_dir, fle)
|
||||||
|
if os.path.isfile(file_path) and file_path.endswith(".csv"):
|
||||||
|
df_iter = pd.read_csv(file_path, parse_dates=[time_column_name])
|
||||||
|
for _, iteration in df_iter.groupby(BACKTEST_ITER):
|
||||||
|
dfs.append(iteration)
|
||||||
|
forecast_df = pd.concat(dfs, sort=False, ignore_index=True)
|
||||||
|
# To make sure plots are in order, sort the predictions by grain and iteration.
|
||||||
|
ts_index = grains + [BACKTEST_ITER]
|
||||||
|
forecast_df.sort_values(by=ts_index, inplace=True)
|
||||||
|
pdf = PdfPages(os.path.join(output_dir, PLOTS_FILE))
|
||||||
|
for _, one_forecast in forecast_df.groupby(ts_index):
|
||||||
|
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
|
||||||
|
pdf.close()
|
||||||
|
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
|
||||||
|
# Remove np.NaN and np.inf from the prediction and actuals data.
|
||||||
|
forecast_df.replace([np.inf, -np.inf], np.nan, inplace=True)
|
||||||
|
forecast_df.dropna(subset=[ACTUALS, PREDICTIONS], inplace=True)
|
||||||
|
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
|
||||||
|
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
args = {"forecasts": "--forecasts", "scores_out": "--output-dir"}
|
||||||
|
parser = argparse.ArgumentParser("Parsing input arguments.")
|
||||||
|
for argname, arg in args.items():
|
||||||
|
parser.add_argument(arg, dest=argname, required=True)
|
||||||
|
parsed_args, _ = parser.parse_known_args()
|
||||||
|
input_dir = parsed_args.forecasts
|
||||||
|
output_dir = parsed_args.scores_out
|
||||||
|
with open(
|
||||||
|
os.path.join(
|
||||||
|
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
|
||||||
|
)
|
||||||
|
) as json_file:
|
||||||
|
automl_settings = json.load(json_file)
|
||||||
|
calculate_scores_and_build_plots(input_dir, output_dir, automl_settings)
|
||||||
@@ -0,0 +1,729 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
|
"Licensed under the MIT License.\n",
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Automated MachineLearning\n",
|
||||||
|
"_**The model backtesting**_\n",
|
||||||
|
"\n",
|
||||||
|
"## Contents\n",
|
||||||
|
"1. [Introduction](#Introduction)\n",
|
||||||
|
"2. [Setup](#Setup)\n",
|
||||||
|
"3. [Data](#Data)\n",
|
||||||
|
"4. [Prepare remote compute and data.](#prepare_remote)\n",
|
||||||
|
"5. [Create the configuration for AutoML backtesting](#train)\n",
|
||||||
|
"6. [Backtest AutoML](#backtest_automl)\n",
|
||||||
|
"7. [View metrics](#Metrics)\n",
|
||||||
|
"8. [Backtest the best model](#backtest_model)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Introduction\n",
|
||||||
|
"Model backtesting is used to evaluate its performance on historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n",
|
||||||
|
"This notebook is intended to demonstrate backtesting on a single model, this is the best solution for small data sets with a few or one time series in it. For scenarios where we would like to choose the best AutoML model for every backtest iteration, please see [AutoML Forecasting Backtest Many Models Example](../forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb) notebook.\n",
|
||||||
|
"\n",
|
||||||
|
"This notebook demonstrates two ways of backtesting:\n",
|
||||||
|
"- AutoML backtesting: we will train separate AutoML models for historical data\n",
|
||||||
|
"- Model backtesting: from the first run we will select the best model trained on the most recent data, retrain it on the past data and evaluate."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Setup"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import shutil\n",
|
||||||
|
"\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"from azureml.core import Experiment, Model, Workspace"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"This notebook is compatible with Azure ML SDK version 1.35.1 or later."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"As part of the setup you have already created a <b>Workspace</b>."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"\n",
|
||||||
|
"output = {}\n",
|
||||||
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
|
"output[\"SKU\"] = ws.sku\n",
|
||||||
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
|
"output[\"Location\"] = ws.location\n",
|
||||||
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
|
"outputDf.T"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Data\n",
|
||||||
|
"For the demonstration purposes we will simulate one year of daily data. To do this we need to specify the following parameters: time column name, time series ID column names and label column name. Our intention is to forecast for two weeks ahead."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"TIME_COLUMN_NAME = \"date\"\n",
|
||||||
|
"TIME_SERIES_ID_COLUMN_NAMES = \"time_series_id\"\n",
|
||||||
|
"LABEL_COLUMN_NAME = \"y\"\n",
|
||||||
|
"FORECAST_HORIZON = 14\n",
|
||||||
|
"FREQUENCY = \"D\"\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"def simulate_timeseries_data(\n",
|
||||||
|
" train_len: int,\n",
|
||||||
|
" test_len: int,\n",
|
||||||
|
" time_column_name: str,\n",
|
||||||
|
" target_column_name: str,\n",
|
||||||
|
" time_series_id_column_name: str,\n",
|
||||||
|
" time_series_number: int = 1,\n",
|
||||||
|
" freq: str = \"H\",\n",
|
||||||
|
"):\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" Return the time series of designed length.\n",
|
||||||
|
"\n",
|
||||||
|
" :param train_len: The length of training data (one series).\n",
|
||||||
|
" :type train_len: int\n",
|
||||||
|
" :param test_len: The length of testing data (one series).\n",
|
||||||
|
" :type test_len: int\n",
|
||||||
|
" :param time_column_name: The desired name of a time column.\n",
|
||||||
|
" :type time_column_name: str\n",
|
||||||
|
" :param time_series_number: The number of time series in the data set.\n",
|
||||||
|
" :type time_series_number: int\n",
|
||||||
|
" :param freq: The frequency string representing pandas offset.\n",
|
||||||
|
" see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n",
|
||||||
|
" :type freq: str\n",
|
||||||
|
" :returns: the tuple of train and test data sets.\n",
|
||||||
|
" :rtype: tuple\n",
|
||||||
|
"\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" data_train = [] # type: List[pd.DataFrame]\n",
|
||||||
|
" data_test = [] # type: List[pd.DataFrame]\n",
|
||||||
|
" data_length = train_len + test_len\n",
|
||||||
|
" for i in range(time_series_number):\n",
|
||||||
|
" X = pd.DataFrame(\n",
|
||||||
|
" {\n",
|
||||||
|
" time_column_name: pd.date_range(\n",
|
||||||
|
" start=\"2000-01-01\", periods=data_length, freq=freq\n",
|
||||||
|
" ),\n",
|
||||||
|
" target_column_name: np.arange(data_length).astype(float)\n",
|
||||||
|
" + np.random.rand(data_length)\n",
|
||||||
|
" + i * 5,\n",
|
||||||
|
" \"ext_predictor\": np.asarray(range(42, 42 + data_length)),\n",
|
||||||
|
" time_series_id_column_name: np.repeat(\"ts{}\".format(i), data_length),\n",
|
||||||
|
" }\n",
|
||||||
|
" )\n",
|
||||||
|
" data_train.append(X[:train_len])\n",
|
||||||
|
" data_test.append(X[train_len:])\n",
|
||||||
|
" train = pd.concat(data_train)\n",
|
||||||
|
" label_train = train.pop(target_column_name).values\n",
|
||||||
|
" test = pd.concat(data_test)\n",
|
||||||
|
" label_test = test.pop(target_column_name).values\n",
|
||||||
|
" return train, label_train, test, label_test\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"n_test_periods = FORECAST_HORIZON\n",
|
||||||
|
"n_train_periods = 365\n",
|
||||||
|
"X_train, y_train, X_test, y_test = simulate_timeseries_data(\n",
|
||||||
|
" train_len=n_train_periods,\n",
|
||||||
|
" test_len=n_test_periods,\n",
|
||||||
|
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||||
|
" target_column_name=LABEL_COLUMN_NAME,\n",
|
||||||
|
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAMES,\n",
|
||||||
|
" time_series_number=2,\n",
|
||||||
|
" freq=FREQUENCY,\n",
|
||||||
|
")\n",
|
||||||
|
"X_train[LABEL_COLUMN_NAME] = y_train"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Let's see what the training data looks like."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"X_train.tail()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Prepare remote compute and data. <a id=\"prepare_remote\"></a>\n",
|
||||||
|
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace), is paired with the storage account, which contains the default data store. We will use it to upload the artificial data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||||
|
"\n",
|
||||||
|
"ds = ws.get_default_datastore()\n",
|
||||||
|
"# Upload saved data to the default data store.\n",
|
||||||
|
"train_data = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||||
|
" X_train, target=(ds, \"data\"), name=\"data_backtest\"\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"You will need to create a compute target for backtesting. In this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute), you create AmlCompute as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
|
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||||
|
"\n",
|
||||||
|
"# Choose a name for your CPU cluster\n",
|
||||||
|
"amlcompute_cluster_name = \"backtest-cluster\"\n",
|
||||||
|
"\n",
|
||||||
|
"# Verify that cluster does not exist already\n",
|
||||||
|
"try:\n",
|
||||||
|
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||||
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
|
"except ComputeTargetException:\n",
|
||||||
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
|
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||||
|
" )\n",
|
||||||
|
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||||
|
"\n",
|
||||||
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Create the configuration for AutoML backtesting <a id=\"train\"></a>\n",
|
||||||
|
"\n",
|
||||||
|
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
|
||||||
|
"\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **task** | forecasting |\n",
|
||||||
|
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>normalized_root_mean_squared_error</i><br><i>normalized_mean_absolute_error</i> |\n",
|
||||||
|
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||||
|
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||||
|
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||||
|
"| **label_column_name** | The name of the label column. |\n",
|
||||||
|
"| **max_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
||||||
|
"| **n_cross_validations** | Number of cross validation splits. The default value is \"auto\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
||||||
|
"|**cv_step_size**|Number of periods between two consecutive cross-validation folds. The default value is \"auto\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value.\n",
|
||||||
|
"| **time_column_name** | The name of your time column. |\n",
|
||||||
|
"| **grain_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"automl_settings = {\n",
|
||||||
|
" \"task\": \"forecasting\",\n",
|
||||||
|
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
|
||||||
|
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
|
||||||
|
" \"iterations\": 15,\n",
|
||||||
|
" \"experiment_timeout_hours\": 1, # This also needs to be changed based on the dataset. For larger data set this number needs to be bigger.\n",
|
||||||
|
" \"label_column_name\": LABEL_COLUMN_NAME,\n",
|
||||||
|
" \"n_cross_validations\": \"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||||
|
" \"cv_step_size\": \"auto\",\n",
|
||||||
|
" \"time_column_name\": TIME_COLUMN_NAME,\n",
|
||||||
|
" \"max_horizon\": FORECAST_HORIZON,\n",
|
||||||
|
" \"track_child_runs\": False,\n",
|
||||||
|
" \"grain_column_names\": TIME_SERIES_ID_COLUMN_NAMES,\n",
|
||||||
|
"}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Backtest AutoML <a id=\"backtest_automl\"></a>\n",
|
||||||
|
"First we set backtesting parameters: we will step back by 30 days and will make 5 such steps; for each step we will forecast for next two weeks."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# The number of periods to step back on each backtest iteration.\n",
|
||||||
|
"BACKTESTING_PERIOD = 30\n",
|
||||||
|
"# The number of times we will back test the model.\n",
|
||||||
|
"NUMBER_OF_BACKTESTS = 5"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"To train AutoML on backtesting folds we will use the [Azure Machine Learning pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/concept-ml-pipelines). It will generate backtest folds, then train model for each of them and calculate the accuracy metrics. To run pipeline, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve (here, it is a forecasting), while a Run corresponds to a specific approach to the problem."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from uuid import uuid1\n",
|
||||||
|
"\n",
|
||||||
|
"from pipeline_helper import get_backtest_pipeline\n",
|
||||||
|
"\n",
|
||||||
|
"pipeline_exp = Experiment(ws, \"automl-backtesting\")\n",
|
||||||
|
"\n",
|
||||||
|
"# We will create the unique identifier to mark our models.\n",
|
||||||
|
"model_uid = str(uuid1())\n",
|
||||||
|
"\n",
|
||||||
|
"pipeline = get_backtest_pipeline(\n",
|
||||||
|
" experiment=pipeline_exp,\n",
|
||||||
|
" dataset=train_data,\n",
|
||||||
|
" # The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.\n",
|
||||||
|
" process_per_node=2,\n",
|
||||||
|
" # The maximum number of nodes for our compute is 6.\n",
|
||||||
|
" node_count=6,\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" automl_settings=automl_settings,\n",
|
||||||
|
" step_size=BACKTESTING_PERIOD,\n",
|
||||||
|
" step_number=NUMBER_OF_BACKTESTS,\n",
|
||||||
|
" model_uid=model_uid,\n",
|
||||||
|
" forecast_quantiles=[0.025, 0.975], # Optional\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Run the pipeline and wait for results."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"pipeline_run = pipeline_exp.submit(pipeline)\n",
|
||||||
|
"pipeline_run.wait_for_completion(show_output=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"After the run is complete, we can download the results. "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"metrics_output = pipeline_run.get_pipeline_output(\"results\")\n",
|
||||||
|
"metrics_output.download(\"backtest_metrics\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## View metrics<a id=\"Metrics\"></a>\n",
|
||||||
|
"To distinguish these metrics from the model backtest, which we will obtain in the next section, we will move the directory with metrics out of the backtest_metrics and will remove the parent folder. We will create the utility function for that."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def copy_scoring_directory(new_name):\n",
|
||||||
|
" scores_path = os.path.join(\"backtest_metrics\", \"azureml\")\n",
|
||||||
|
" directory_list = [os.path.join(scores_path, d) for d in os.listdir(scores_path)]\n",
|
||||||
|
" latest_file = max(directory_list, key=os.path.getctime)\n",
|
||||||
|
" print(\n",
|
||||||
|
" f\"The output directory {latest_file} was created on {pd.Timestamp(os.path.getctime(latest_file), unit='s')} GMT.\"\n",
|
||||||
|
" )\n",
|
||||||
|
" shutil.move(os.path.join(latest_file, \"results\"), new_name)\n",
|
||||||
|
" shutil.rmtree(\"backtest_metrics\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Move the directory and list its contents."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"copy_scoring_directory(\"automl_backtest\")\n",
|
||||||
|
"pd.DataFrame({\"File\": os.listdir(\"automl_backtest\")})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The directory contains a set of files with results:\n",
|
||||||
|
"- forecast.csv contains forecasts for all backtest iterations. The backtest_iteration column contains iteration identifier with the last training date as a suffix\n",
|
||||||
|
"- scores.csv contains all metrics. If data set contains several time series, the metrics are given for all combinations of time series id and iterations, as well as scores for all iterations and time series id are marked as \"all_sets\"\n",
|
||||||
|
"- plots_fcst_vs_actual.pdf contains the predictions vs forecast plots for each iteration and time series.\n",
|
||||||
|
"\n",
|
||||||
|
"For demonstration purposes we will display the table of metrics for one of the time series with ID \"ts0\". Again, we will create the utility function, which will be re used in model backtesting."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def get_metrics_for_ts(all_metrics, ts):\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" Get the metrics for the time series with ID ts and return it as pandas data frame.\n",
|
||||||
|
"\n",
|
||||||
|
" :param all_metrics: The table with all the metrics.\n",
|
||||||
|
" :param ts: The ID of a time series of interest.\n",
|
||||||
|
" :return: The pandas DataFrame with metrics for one time series.\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" results_df = None\n",
|
||||||
|
" for ts_id, one_series in all_metrics.groupby(\"time_series_id\"):\n",
|
||||||
|
" if not ts_id.startswith(ts):\n",
|
||||||
|
" continue\n",
|
||||||
|
" iteration = ts_id.split(\"|\")[-1]\n",
|
||||||
|
" df = one_series[[\"metric_name\", \"metric\"]]\n",
|
||||||
|
" df.rename({\"metric\": iteration}, axis=1, inplace=True)\n",
|
||||||
|
" df.set_index(\"metric_name\", inplace=True)\n",
|
||||||
|
" if results_df is None:\n",
|
||||||
|
" results_df = df\n",
|
||||||
|
" else:\n",
|
||||||
|
" results_df = results_df.merge(\n",
|
||||||
|
" df, how=\"inner\", left_index=True, right_index=True\n",
|
||||||
|
" )\n",
|
||||||
|
" results_df.sort_index(axis=1, inplace=True)\n",
|
||||||
|
" return results_df\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"metrics_df = pd.read_csv(os.path.join(\"automl_backtest\", \"scores.csv\"))\n",
|
||||||
|
"ts_id = \"ts0\"\n",
|
||||||
|
"get_metrics_for_ts(metrics_df, ts_id)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Forecast vs actuals plots."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from IPython.display import IFrame\n",
|
||||||
|
"\n",
|
||||||
|
"IFrame(\"./automl_backtest/plots_fcst_vs_actual.pdf\", width=800, height=300)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# <font color='blue'>Backtest the best model</font> <a id=\"backtest_model\"></a>\n",
|
||||||
|
"\n",
|
||||||
|
"For model backtesting we will use the same parameters we used to backtest AutoML. All the models, we have obtained in the previous run were registered in our workspace. To identify the model, each was assigned a tag with the last trainig date."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model_list = Model.list(ws, tags=[[\"experiment\", \"automl-backtesting\"]])\n",
|
||||||
|
"model_data = {\"name\": [], \"last_training_date\": []}\n",
|
||||||
|
"for model in model_list:\n",
|
||||||
|
" if (\n",
|
||||||
|
" \"last_training_date\" not in model.tags\n",
|
||||||
|
" or \"model_uid\" not in model.tags\n",
|
||||||
|
" or model.tags[\"model_uid\"] != model_uid\n",
|
||||||
|
" ):\n",
|
||||||
|
" continue\n",
|
||||||
|
" model_data[\"name\"].append(model.name)\n",
|
||||||
|
" model_data[\"last_training_date\"].append(\n",
|
||||||
|
" pd.Timestamp(model.tags[\"last_training_date\"])\n",
|
||||||
|
" )\n",
|
||||||
|
"df_models = pd.DataFrame(model_data)\n",
|
||||||
|
"df_models.sort_values([\"last_training_date\"], inplace=True)\n",
|
||||||
|
"df_models.reset_index(inplace=True, drop=True)\n",
|
||||||
|
"df_models"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"We will backtest the model trained on the most recet data."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model_name = df_models[\"name\"].iloc[-1]\n",
|
||||||
|
"model_name"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Retrain the models.\n",
|
||||||
|
"Assemble the pipeline, which will retrain the best model from AutoML run on historical data."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"pipeline_exp = Experiment(ws, \"model-backtesting\")\n",
|
||||||
|
"\n",
|
||||||
|
"pipeline = get_backtest_pipeline(\n",
|
||||||
|
" experiment=pipeline_exp,\n",
|
||||||
|
" dataset=train_data,\n",
|
||||||
|
" # The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.\n",
|
||||||
|
" process_per_node=2,\n",
|
||||||
|
" # The maximum number of nodes for our compute is 6.\n",
|
||||||
|
" node_count=6,\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" automl_settings=automl_settings,\n",
|
||||||
|
" step_size=BACKTESTING_PERIOD,\n",
|
||||||
|
" step_number=NUMBER_OF_BACKTESTS,\n",
|
||||||
|
" model_name=model_name,\n",
|
||||||
|
" forecast_quantiles=[0.025, 0.975],\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Launch the backtesting pipeline."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"pipeline_run = pipeline_exp.submit(pipeline)\n",
|
||||||
|
"pipeline_run.wait_for_completion(show_output=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The metrics are stored in the pipeline output named \"score\". The next code will download the table with metrics."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"metrics_output = pipeline_run.get_pipeline_output(\"results\")\n",
|
||||||
|
"metrics_output.download(\"backtest_metrics\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Again, we will copy the data files from the downloaded directory, but in this case we will call the folder \"model_backtest\"; it will contain the same files as the one for AutoML backtesting."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"copy_scoring_directory(\"model_backtest\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Finally, we will display the metrics."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model_metrics_df = pd.read_csv(os.path.join(\"model_backtest\", \"scores.csv\"))\n",
|
||||||
|
"get_metrics_for_ts(model_metrics_df, ts_id)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Forecast vs actuals plots."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from IPython.display import IFrame\n",
|
||||||
|
"\n",
|
||||||
|
"IFrame(\"./model_backtest/plots_fcst_vs_actual.pdf\", width=800, height=300)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "jialiu"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"category": "tutorial",
|
||||||
|
"compute": [
|
||||||
|
"Remote"
|
||||||
|
],
|
||||||
|
"datasets": [
|
||||||
|
"None"
|
||||||
|
],
|
||||||
|
"deployment": [
|
||||||
|
"None"
|
||||||
|
],
|
||||||
|
"exclude_from_index": false,
|
||||||
|
"framework": [
|
||||||
|
"Azure ML AutoML"
|
||||||
|
],
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.8 - AzureML",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python38-azureml"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.8.5"
|
||||||
|
},
|
||||||
|
"vscode": {
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-backtest-single-model
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -0,0 +1,169 @@
|
|||||||
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import azureml.train.automl.runtime._hts.hts_runtime_utilities as hru
|
||||||
|
|
||||||
|
from azureml._restclient.jasmine_client import JasmineClient
|
||||||
|
from azureml.contrib.automl.pipeline.steps import utilities
|
||||||
|
from azureml.core import RunConfiguration
|
||||||
|
from azureml.core.compute import ComputeTarget
|
||||||
|
from azureml.core.experiment import Experiment
|
||||||
|
from azureml.data import LinkTabularOutputDatasetConfig, TabularDataset
|
||||||
|
from azureml.pipeline.core import Pipeline, PipelineData, PipelineParameter
|
||||||
|
from azureml.pipeline.steps import ParallelRunConfig, ParallelRunStep, PythonScriptStep
|
||||||
|
from azureml.train.automl.constants import Scenarios
|
||||||
|
from azureml.data.dataset_consumption_config import DatasetConsumptionConfig
|
||||||
|
|
||||||
|
|
||||||
|
PROJECT_FOLDER = "assets"
|
||||||
|
SETTINGS_FILE = "automl_settings.json"
|
||||||
|
|
||||||
|
|
||||||
|
def get_backtest_pipeline(
|
||||||
|
experiment: Experiment,
|
||||||
|
dataset: TabularDataset,
|
||||||
|
process_per_node: int,
|
||||||
|
node_count: int,
|
||||||
|
compute_target: ComputeTarget,
|
||||||
|
automl_settings: Dict[str, Any],
|
||||||
|
step_size: int,
|
||||||
|
step_number: int,
|
||||||
|
model_name: Optional[str] = None,
|
||||||
|
model_uid: Optional[str] = None,
|
||||||
|
forecast_quantiles: Optional[list] = None,
|
||||||
|
) -> Pipeline:
|
||||||
|
"""
|
||||||
|
:param experiment: The experiment used to run the pipeline.
|
||||||
|
:param dataset: Tabular data set to be used for model training.
|
||||||
|
:param process_per_node: The number of processes per node. Generally it should be the number of cores
|
||||||
|
on the node divided by two.
|
||||||
|
:param node_count: The number of nodes to be used.
|
||||||
|
:param compute_target: The compute target to be used to run the pipeline.
|
||||||
|
:param model_name: The name of a model to be back tested.
|
||||||
|
:param automl_settings: The dictionary with automl settings.
|
||||||
|
:param step_size: The number of periods to step back in backtesting.
|
||||||
|
:param step_number: The number of backtesting iterations.
|
||||||
|
:param model_uid: The uid to mark models from this run of the experiment.
|
||||||
|
:param forecast_quantiles: The forecast quantiles that are required in the inference.
|
||||||
|
:return: The pipeline to be used for model retraining.
|
||||||
|
**Note:** The output will be uploaded in the pipeline output
|
||||||
|
called 'score'.
|
||||||
|
"""
|
||||||
|
jasmine_client = JasmineClient(
|
||||||
|
service_context=experiment.workspace.service_context,
|
||||||
|
experiment_name=experiment.name,
|
||||||
|
experiment_id=experiment.id,
|
||||||
|
)
|
||||||
|
env = jasmine_client.get_curated_environment(
|
||||||
|
scenario=Scenarios.AUTOML,
|
||||||
|
enable_dnn=False,
|
||||||
|
enable_gpu=False,
|
||||||
|
compute=compute_target,
|
||||||
|
compute_sku=experiment.workspace.compute_targets.get(
|
||||||
|
compute_target.name
|
||||||
|
).vm_size,
|
||||||
|
)
|
||||||
|
data_results = PipelineData(
|
||||||
|
name="results", datastore=None, pipeline_output_name="results"
|
||||||
|
)
|
||||||
|
############################################################
|
||||||
|
# Split the data set using python script.
|
||||||
|
############################################################
|
||||||
|
run_config = RunConfiguration()
|
||||||
|
run_config.docker.use_docker = True
|
||||||
|
run_config.environment = env
|
||||||
|
|
||||||
|
utilities.set_environment_variables_for_run(run_config)
|
||||||
|
|
||||||
|
split_data = PipelineData(name="split_data_output", datastore=None).as_dataset()
|
||||||
|
split_step = PythonScriptStep(
|
||||||
|
name="split_data_for_backtest",
|
||||||
|
script_name="data_split.py",
|
||||||
|
inputs=[dataset.as_named_input("training_data")],
|
||||||
|
outputs=[split_data],
|
||||||
|
source_directory=PROJECT_FOLDER,
|
||||||
|
arguments=[
|
||||||
|
"--step-size",
|
||||||
|
step_size,
|
||||||
|
"--step-number",
|
||||||
|
step_number,
|
||||||
|
"--time-column-name",
|
||||||
|
automl_settings.get("time_column_name"),
|
||||||
|
"--time-series-id-column-names",
|
||||||
|
automl_settings.get("grain_column_names"),
|
||||||
|
"--output-dir",
|
||||||
|
split_data,
|
||||||
|
],
|
||||||
|
runconfig=run_config,
|
||||||
|
compute_target=compute_target,
|
||||||
|
allow_reuse=False,
|
||||||
|
)
|
||||||
|
############################################################
|
||||||
|
# We will do the backtest the parallel run step.
|
||||||
|
############################################################
|
||||||
|
settings_path = os.path.join(PROJECT_FOLDER, SETTINGS_FILE)
|
||||||
|
hru.dump_object_to_json(automl_settings, settings_path)
|
||||||
|
mini_batch_size = PipelineParameter(name="batch_size_param", default_value=str(1))
|
||||||
|
back_test_config = ParallelRunConfig(
|
||||||
|
source_directory=PROJECT_FOLDER,
|
||||||
|
entry_script="retrain_models.py",
|
||||||
|
mini_batch_size=mini_batch_size,
|
||||||
|
error_threshold=-1,
|
||||||
|
output_action="append_row",
|
||||||
|
append_row_file_name="outputs.txt",
|
||||||
|
compute_target=compute_target,
|
||||||
|
environment=env,
|
||||||
|
process_count_per_node=process_per_node,
|
||||||
|
run_invocation_timeout=3600,
|
||||||
|
node_count=node_count,
|
||||||
|
)
|
||||||
|
utilities.set_environment_variables_for_run(back_test_config)
|
||||||
|
forecasts = PipelineData(name="forecasts", datastore=None)
|
||||||
|
if model_name:
|
||||||
|
parallel_step_name = "{}-backtest".format(model_name.replace("_", "-"))
|
||||||
|
else:
|
||||||
|
parallel_step_name = "AutoML-backtest"
|
||||||
|
|
||||||
|
prs_args = [
|
||||||
|
"--target_column_name",
|
||||||
|
automl_settings.get("label_column_name"),
|
||||||
|
"--output-dir",
|
||||||
|
forecasts,
|
||||||
|
]
|
||||||
|
if model_name is not None:
|
||||||
|
prs_args.append("--model-name")
|
||||||
|
prs_args.append(model_name)
|
||||||
|
if model_uid is not None:
|
||||||
|
prs_args.append("--model-uid")
|
||||||
|
prs_args.append(model_uid)
|
||||||
|
if forecast_quantiles:
|
||||||
|
prs_args.append("--forecast_quantiles")
|
||||||
|
prs_args.extend(forecast_quantiles)
|
||||||
|
backtest_prs = ParallelRunStep(
|
||||||
|
name=parallel_step_name,
|
||||||
|
parallel_run_config=back_test_config,
|
||||||
|
arguments=prs_args,
|
||||||
|
inputs=[split_data],
|
||||||
|
output=forecasts,
|
||||||
|
allow_reuse=False,
|
||||||
|
)
|
||||||
|
############################################################
|
||||||
|
# Then we collect the output and return it as scores output.
|
||||||
|
############################################################
|
||||||
|
collection_step = PythonScriptStep(
|
||||||
|
name="score",
|
||||||
|
script_name="score.py",
|
||||||
|
inputs=[forecasts.as_mount()],
|
||||||
|
outputs=[data_results],
|
||||||
|
source_directory=PROJECT_FOLDER,
|
||||||
|
arguments=["--forecasts", forecasts, "--output-dir", data_results],
|
||||||
|
runconfig=run_config,
|
||||||
|
compute_target=compute_target,
|
||||||
|
allow_reuse=False,
|
||||||
|
)
|
||||||
|
# Build and return the pipeline.
|
||||||
|
return Pipeline(
|
||||||
|
workspace=experiment.workspace,
|
||||||
|
steps=[split_step, backtest_prs, collection_step],
|
||||||
|
)
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
DATE,grain,BeerProduction
|
|
||||||
2017-01-01,grain,9049
|
|
||||||
2017-02-01,grain,10458
|
|
||||||
2017-03-01,grain,12489
|
|
||||||
2017-04-01,grain,11499
|
|
||||||
2017-05-01,grain,13553
|
|
||||||
2017-06-01,grain,14740
|
|
||||||
2017-07-01,grain,11424
|
|
||||||
2017-08-01,grain,13412
|
|
||||||
2017-09-01,grain,11917
|
|
||||||
2017-10-01,grain,12721
|
|
||||||
2017-11-01,grain,13272
|
|
||||||
2017-12-01,grain,14278
|
|
||||||
2018-01-01,grain,9572
|
|
||||||
2018-02-01,grain,10423
|
|
||||||
2018-03-01,grain,12667
|
|
||||||
2018-04-01,grain,11904
|
|
||||||
2018-05-01,grain,14120
|
|
||||||
2018-06-01,grain,14565
|
|
||||||
2018-07-01,grain,12622
|
|
||||||
|
@@ -1,301 +0,0 @@
|
|||||||
DATE,grain,BeerProduction
|
|
||||||
1992-01-01,grain,3459
|
|
||||||
1992-02-01,grain,3458
|
|
||||||
1992-03-01,grain,4002
|
|
||||||
1992-04-01,grain,4564
|
|
||||||
1992-05-01,grain,4221
|
|
||||||
1992-06-01,grain,4529
|
|
||||||
1992-07-01,grain,4466
|
|
||||||
1992-08-01,grain,4137
|
|
||||||
1992-09-01,grain,4126
|
|
||||||
1992-10-01,grain,4259
|
|
||||||
1992-11-01,grain,4240
|
|
||||||
1992-12-01,grain,4936
|
|
||||||
1993-01-01,grain,3031
|
|
||||||
1993-02-01,grain,3261
|
|
||||||
1993-03-01,grain,4160
|
|
||||||
1993-04-01,grain,4377
|
|
||||||
1993-05-01,grain,4307
|
|
||||||
1993-06-01,grain,4696
|
|
||||||
1993-07-01,grain,4458
|
|
||||||
1993-08-01,grain,4457
|
|
||||||
1993-09-01,grain,4364
|
|
||||||
1993-10-01,grain,4236
|
|
||||||
1993-11-01,grain,4500
|
|
||||||
1993-12-01,grain,4974
|
|
||||||
1994-01-01,grain,3075
|
|
||||||
1994-02-01,grain,3377
|
|
||||||
1994-03-01,grain,4443
|
|
||||||
1994-04-01,grain,4261
|
|
||||||
1994-05-01,grain,4460
|
|
||||||
1994-06-01,grain,4985
|
|
||||||
1994-07-01,grain,4324
|
|
||||||
1994-08-01,grain,4719
|
|
||||||
1994-09-01,grain,4374
|
|
||||||
1994-10-01,grain,4248
|
|
||||||
1994-11-01,grain,4784
|
|
||||||
1994-12-01,grain,4971
|
|
||||||
1995-01-01,grain,3370
|
|
||||||
1995-02-01,grain,3484
|
|
||||||
1995-03-01,grain,4269
|
|
||||||
1995-04-01,grain,3994
|
|
||||||
1995-05-01,grain,4715
|
|
||||||
1995-06-01,grain,4974
|
|
||||||
1995-07-01,grain,4223
|
|
||||||
1995-08-01,grain,5000
|
|
||||||
1995-09-01,grain,4235
|
|
||||||
1995-10-01,grain,4554
|
|
||||||
1995-11-01,grain,4851
|
|
||||||
1995-12-01,grain,4826
|
|
||||||
1996-01-01,grain,3699
|
|
||||||
1996-02-01,grain,3983
|
|
||||||
1996-03-01,grain,4262
|
|
||||||
1996-04-01,grain,4619
|
|
||||||
1996-05-01,grain,5219
|
|
||||||
1996-06-01,grain,4836
|
|
||||||
1996-07-01,grain,4941
|
|
||||||
1996-08-01,grain,5062
|
|
||||||
1996-09-01,grain,4365
|
|
||||||
1996-10-01,grain,5012
|
|
||||||
1996-11-01,grain,4850
|
|
||||||
1996-12-01,grain,5097
|
|
||||||
1997-01-01,grain,3758
|
|
||||||
1997-02-01,grain,3825
|
|
||||||
1997-03-01,grain,4454
|
|
||||||
1997-04-01,grain,4635
|
|
||||||
1997-05-01,grain,5210
|
|
||||||
1997-06-01,grain,5057
|
|
||||||
1997-07-01,grain,5231
|
|
||||||
1997-08-01,grain,5034
|
|
||||||
1997-09-01,grain,4970
|
|
||||||
1997-10-01,grain,5342
|
|
||||||
1997-11-01,grain,4831
|
|
||||||
1997-12-01,grain,5965
|
|
||||||
1998-01-01,grain,3796
|
|
||||||
1998-02-01,grain,4019
|
|
||||||
1998-03-01,grain,4898
|
|
||||||
1998-04-01,grain,5090
|
|
||||||
1998-05-01,grain,5237
|
|
||||||
1998-06-01,grain,5447
|
|
||||||
1998-07-01,grain,5435
|
|
||||||
1998-08-01,grain,5107
|
|
||||||
1998-09-01,grain,5515
|
|
||||||
1998-10-01,grain,5583
|
|
||||||
1998-11-01,grain,5346
|
|
||||||
1998-12-01,grain,6286
|
|
||||||
1999-01-01,grain,4032
|
|
||||||
1999-02-01,grain,4435
|
|
||||||
1999-03-01,grain,5479
|
|
||||||
1999-04-01,grain,5483
|
|
||||||
1999-05-01,grain,5587
|
|
||||||
1999-06-01,grain,6176
|
|
||||||
1999-07-01,grain,5621
|
|
||||||
1999-08-01,grain,5889
|
|
||||||
1999-09-01,grain,5828
|
|
||||||
1999-10-01,grain,5849
|
|
||||||
1999-11-01,grain,6180
|
|
||||||
1999-12-01,grain,6771
|
|
||||||
2000-01-01,grain,4243
|
|
||||||
2000-02-01,grain,4952
|
|
||||||
2000-03-01,grain,6008
|
|
||||||
2000-04-01,grain,5353
|
|
||||||
2000-05-01,grain,6435
|
|
||||||
2000-06-01,grain,6673
|
|
||||||
2000-07-01,grain,5636
|
|
||||||
2000-08-01,grain,6630
|
|
||||||
2000-09-01,grain,5887
|
|
||||||
2000-10-01,grain,6322
|
|
||||||
2000-11-01,grain,6520
|
|
||||||
2000-12-01,grain,6678
|
|
||||||
2001-01-01,grain,5082
|
|
||||||
2001-02-01,grain,5216
|
|
||||||
2001-03-01,grain,5893
|
|
||||||
2001-04-01,grain,5894
|
|
||||||
2001-05-01,grain,6799
|
|
||||||
2001-06-01,grain,6667
|
|
||||||
2001-07-01,grain,6374
|
|
||||||
2001-08-01,grain,6840
|
|
||||||
2001-09-01,grain,5575
|
|
||||||
2001-10-01,grain,6545
|
|
||||||
2001-11-01,grain,6789
|
|
||||||
2001-12-01,grain,7180
|
|
||||||
2002-01-01,grain,5117
|
|
||||||
2002-02-01,grain,5442
|
|
||||||
2002-03-01,grain,6337
|
|
||||||
2002-04-01,grain,6525
|
|
||||||
2002-05-01,grain,7216
|
|
||||||
2002-06-01,grain,6761
|
|
||||||
2002-07-01,grain,6958
|
|
||||||
2002-08-01,grain,7070
|
|
||||||
2002-09-01,grain,6148
|
|
||||||
2002-10-01,grain,6924
|
|
||||||
2002-11-01,grain,6716
|
|
||||||
2002-12-01,grain,7975
|
|
||||||
2003-01-01,grain,5326
|
|
||||||
2003-02-01,grain,5609
|
|
||||||
2003-03-01,grain,6414
|
|
||||||
2003-04-01,grain,6741
|
|
||||||
2003-05-01,grain,7144
|
|
||||||
2003-06-01,grain,7133
|
|
||||||
2003-07-01,grain,7568
|
|
||||||
2003-08-01,grain,7266
|
|
||||||
2003-09-01,grain,6634
|
|
||||||
2003-10-01,grain,7626
|
|
||||||
2003-11-01,grain,6843
|
|
||||||
2003-12-01,grain,8540
|
|
||||||
2004-01-01,grain,5629
|
|
||||||
2004-02-01,grain,5898
|
|
||||||
2004-03-01,grain,7045
|
|
||||||
2004-04-01,grain,7094
|
|
||||||
2004-05-01,grain,7333
|
|
||||||
2004-06-01,grain,7918
|
|
||||||
2004-07-01,grain,7289
|
|
||||||
2004-08-01,grain,7396
|
|
||||||
2004-09-01,grain,7259
|
|
||||||
2004-10-01,grain,7268
|
|
||||||
2004-11-01,grain,7731
|
|
||||||
2004-12-01,grain,9058
|
|
||||||
2005-01-01,grain,5557
|
|
||||||
2005-02-01,grain,6237
|
|
||||||
2005-03-01,grain,7723
|
|
||||||
2005-04-01,grain,7262
|
|
||||||
2005-05-01,grain,8241
|
|
||||||
2005-06-01,grain,8757
|
|
||||||
2005-07-01,grain,7352
|
|
||||||
2005-08-01,grain,8496
|
|
||||||
2005-09-01,grain,7741
|
|
||||||
2005-10-01,grain,7710
|
|
||||||
2005-11-01,grain,8247
|
|
||||||
2005-12-01,grain,8902
|
|
||||||
2006-01-01,grain,6066
|
|
||||||
2006-02-01,grain,6590
|
|
||||||
2006-03-01,grain,7923
|
|
||||||
2006-04-01,grain,7335
|
|
||||||
2006-05-01,grain,8843
|
|
||||||
2006-06-01,grain,9327
|
|
||||||
2006-07-01,grain,7792
|
|
||||||
2006-08-01,grain,9156
|
|
||||||
2006-09-01,grain,8037
|
|
||||||
2006-10-01,grain,8640
|
|
||||||
2006-11-01,grain,9128
|
|
||||||
2006-12-01,grain,9545
|
|
||||||
2007-01-01,grain,6627
|
|
||||||
2007-02-01,grain,6743
|
|
||||||
2007-03-01,grain,8195
|
|
||||||
2007-04-01,grain,7828
|
|
||||||
2007-05-01,grain,9570
|
|
||||||
2007-06-01,grain,9484
|
|
||||||
2007-07-01,grain,8608
|
|
||||||
2007-08-01,grain,9543
|
|
||||||
2007-09-01,grain,8123
|
|
||||||
2007-10-01,grain,9649
|
|
||||||
2007-11-01,grain,9390
|
|
||||||
2007-12-01,grain,10065
|
|
||||||
2008-01-01,grain,7093
|
|
||||||
2008-02-01,grain,7483
|
|
||||||
2008-03-01,grain,8365
|
|
||||||
2008-04-01,grain,8895
|
|
||||||
2008-05-01,grain,9794
|
|
||||||
2008-06-01,grain,9977
|
|
||||||
2008-07-01,grain,9553
|
|
||||||
2008-08-01,grain,9375
|
|
||||||
2008-09-01,grain,9225
|
|
||||||
2008-10-01,grain,9948
|
|
||||||
2008-11-01,grain,8758
|
|
||||||
2008-12-01,grain,10839
|
|
||||||
2009-01-01,grain,7266
|
|
||||||
2009-02-01,grain,7578
|
|
||||||
2009-03-01,grain,8688
|
|
||||||
2009-04-01,grain,9162
|
|
||||||
2009-05-01,grain,9369
|
|
||||||
2009-06-01,grain,10167
|
|
||||||
2009-07-01,grain,9507
|
|
||||||
2009-08-01,grain,8923
|
|
||||||
2009-09-01,grain,9272
|
|
||||||
2009-10-01,grain,9075
|
|
||||||
2009-11-01,grain,8949
|
|
||||||
2009-12-01,grain,10843
|
|
||||||
2010-01-01,grain,6558
|
|
||||||
2010-02-01,grain,7481
|
|
||||||
2010-03-01,grain,9475
|
|
||||||
2010-04-01,grain,9424
|
|
||||||
2010-05-01,grain,9351
|
|
||||||
2010-06-01,grain,10552
|
|
||||||
2010-07-01,grain,9077
|
|
||||||
2010-08-01,grain,9273
|
|
||||||
2010-09-01,grain,9420
|
|
||||||
2010-10-01,grain,9413
|
|
||||||
2010-11-01,grain,9866
|
|
||||||
2010-12-01,grain,11455
|
|
||||||
2011-01-01,grain,6901
|
|
||||||
2011-02-01,grain,8014
|
|
||||||
2011-03-01,grain,9832
|
|
||||||
2011-04-01,grain,9281
|
|
||||||
2011-05-01,grain,9967
|
|
||||||
2011-06-01,grain,11344
|
|
||||||
2011-07-01,grain,9106
|
|
||||||
2011-08-01,grain,10469
|
|
||||||
2011-09-01,grain,10085
|
|
||||||
2011-10-01,grain,9612
|
|
||||||
2011-11-01,grain,10328
|
|
||||||
2011-12-01,grain,11483
|
|
||||||
2012-01-01,grain,7486
|
|
||||||
2012-02-01,grain,8641
|
|
||||||
2012-03-01,grain,9709
|
|
||||||
2012-04-01,grain,9423
|
|
||||||
2012-05-01,grain,11342
|
|
||||||
2012-06-01,grain,11274
|
|
||||||
2012-07-01,grain,9845
|
|
||||||
2012-08-01,grain,11163
|
|
||||||
2012-09-01,grain,9532
|
|
||||||
2012-10-01,grain,10754
|
|
||||||
2012-11-01,grain,10953
|
|
||||||
2012-12-01,grain,11922
|
|
||||||
2013-01-01,grain,8395
|
|
||||||
2013-02-01,grain,8888
|
|
||||||
2013-03-01,grain,10110
|
|
||||||
2013-04-01,grain,10493
|
|
||||||
2013-05-01,grain,12218
|
|
||||||
2013-06-01,grain,11385
|
|
||||||
2013-07-01,grain,11186
|
|
||||||
2013-08-01,grain,11462
|
|
||||||
2013-09-01,grain,10494
|
|
||||||
2013-10-01,grain,11540
|
|
||||||
2013-11-01,grain,11138
|
|
||||||
2013-12-01,grain,12709
|
|
||||||
2014-01-01,grain,8557
|
|
||||||
2014-02-01,grain,9059
|
|
||||||
2014-03-01,grain,10055
|
|
||||||
2014-04-01,grain,10977
|
|
||||||
2014-05-01,grain,11792
|
|
||||||
2014-06-01,grain,11904
|
|
||||||
2014-07-01,grain,10965
|
|
||||||
2014-08-01,grain,10981
|
|
||||||
2014-09-01,grain,10828
|
|
||||||
2014-10-01,grain,11817
|
|
||||||
2014-11-01,grain,10470
|
|
||||||
2014-12-01,grain,13310
|
|
||||||
2015-01-01,grain,8400
|
|
||||||
2015-02-01,grain,9062
|
|
||||||
2015-03-01,grain,10722
|
|
||||||
2015-04-01,grain,11107
|
|
||||||
2015-05-01,grain,11508
|
|
||||||
2015-06-01,grain,12904
|
|
||||||
2015-07-01,grain,11869
|
|
||||||
2015-08-01,grain,11224
|
|
||||||
2015-09-01,grain,12022
|
|
||||||
2015-10-01,grain,11983
|
|
||||||
2015-11-01,grain,11506
|
|
||||||
2015-12-01,grain,14183
|
|
||||||
2016-01-01,grain,8650
|
|
||||||
2016-02-01,grain,10323
|
|
||||||
2016-03-01,grain,12110
|
|
||||||
2016-04-01,grain,11424
|
|
||||||
2016-05-01,grain,12243
|
|
||||||
2016-06-01,grain,13686
|
|
||||||
2016-07-01,grain,10956
|
|
||||||
2016-08-01,grain,12706
|
|
||||||
2016-09-01,grain,12279
|
|
||||||
2016-10-01,grain,11914
|
|
||||||
2016-11-01,grain,13025
|
|
||||||
2016-12-01,grain,14431
|
|
||||||
|
@@ -1,138 +0,0 @@
|
|||||||
import pandas as pd
|
|
||||||
from azureml.core import Environment
|
|
||||||
from azureml.core.conda_dependencies import CondaDependencies
|
|
||||||
from azureml.train.estimator import Estimator
|
|
||||||
from azureml.core.run import Run
|
|
||||||
from azureml.automl.core.shared import constants
|
|
||||||
|
|
||||||
|
|
||||||
def split_fraction_by_grain(df, fraction, time_column_name,
|
|
||||||
grain_column_names=None):
|
|
||||||
if not grain_column_names:
|
|
||||||
df['tmp_grain_column'] = 'grain'
|
|
||||||
grain_column_names = ['tmp_grain_column']
|
|
||||||
|
|
||||||
"""Group df by grain and split on last n rows for each group."""
|
|
||||||
df_grouped = (df.sort_values(time_column_name)
|
|
||||||
.groupby(grain_column_names, group_keys=False))
|
|
||||||
|
|
||||||
df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-int(len(dfg) *
|
|
||||||
fraction)] if fraction > 0 else dfg)
|
|
||||||
|
|
||||||
df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-int(len(dfg) *
|
|
||||||
fraction):] if fraction > 0 else dfg[:0])
|
|
||||||
|
|
||||||
if 'tmp_grain_column' in grain_column_names:
|
|
||||||
for df2 in (df, df_head, df_tail):
|
|
||||||
df2.drop('tmp_grain_column', axis=1, inplace=True)
|
|
||||||
|
|
||||||
grain_column_names.remove('tmp_grain_column')
|
|
||||||
|
|
||||||
return df_head, df_tail
|
|
||||||
|
|
||||||
|
|
||||||
def split_full_for_forecasting(df, time_column_name,
|
|
||||||
grain_column_names=None, test_split=0.2):
|
|
||||||
index_name = df.index.name
|
|
||||||
|
|
||||||
# Assumes that there isn't already a column called tmpindex
|
|
||||||
|
|
||||||
df['tmpindex'] = df.index
|
|
||||||
|
|
||||||
train_df, test_df = split_fraction_by_grain(
|
|
||||||
df, test_split, time_column_name, grain_column_names)
|
|
||||||
|
|
||||||
train_df = train_df.set_index('tmpindex')
|
|
||||||
train_df.index.name = index_name
|
|
||||||
|
|
||||||
test_df = test_df.set_index('tmpindex')
|
|
||||||
test_df.index.name = index_name
|
|
||||||
|
|
||||||
df.drop('tmpindex', axis=1, inplace=True)
|
|
||||||
|
|
||||||
return train_df, test_df
|
|
||||||
|
|
||||||
|
|
||||||
def get_result_df(remote_run):
|
|
||||||
children = list(remote_run.get_children(recursive=True))
|
|
||||||
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm',
|
|
||||||
'primary_metric', 'Score'])
|
|
||||||
goal_minimize = False
|
|
||||||
for run in children:
|
|
||||||
if run.get_status().lower() == constants.RunState.COMPLETE_RUN \
|
|
||||||
and 'run_algorithm' in run.properties and 'score' in run.properties:
|
|
||||||
# We only count in the completed child runs.
|
|
||||||
summary_df[run.id] = [run.id, run.properties['run_algorithm'],
|
|
||||||
run.properties['primary_metric'],
|
|
||||||
float(run.properties['score'])]
|
|
||||||
if ('goal' in run.properties):
|
|
||||||
goal_minimize = run.properties['goal'].split('_')[-1] == 'min'
|
|
||||||
|
|
||||||
summary_df = summary_df.T.sort_values(
|
|
||||||
'Score',
|
|
||||||
ascending=goal_minimize).drop_duplicates(['run_algorithm'])
|
|
||||||
summary_df = summary_df.set_index('run_algorithm')
|
|
||||||
return summary_df
|
|
||||||
|
|
||||||
|
|
||||||
def run_inference(test_experiment, compute_target, script_folder, train_run,
|
|
||||||
test_dataset, lookback_dataset, max_horizon,
|
|
||||||
target_column_name, time_column_name, freq):
|
|
||||||
model_base_name = 'model.pkl'
|
|
||||||
if 'model_data_location' in train_run.properties:
|
|
||||||
model_location = train_run.properties['model_data_location']
|
|
||||||
_, model_base_name = model_location.rsplit('/', 1)
|
|
||||||
train_run.download_file('outputs/{}'.format(model_base_name), 'inference/{}'.format(model_base_name))
|
|
||||||
train_run.download_file('outputs/conda_env_v_1_0_0.yml', 'inference/condafile.yml')
|
|
||||||
|
|
||||||
inference_env = Environment("myenv")
|
|
||||||
inference_env.docker.enabled = True
|
|
||||||
inference_env.python.conda_dependencies = CondaDependencies(
|
|
||||||
conda_dependencies_file_path='inference/condafile.yml')
|
|
||||||
|
|
||||||
est = Estimator(source_directory=script_folder,
|
|
||||||
entry_script='infer.py',
|
|
||||||
script_params={
|
|
||||||
'--max_horizon': max_horizon,
|
|
||||||
'--target_column_name': target_column_name,
|
|
||||||
'--time_column_name': time_column_name,
|
|
||||||
'--frequency': freq,
|
|
||||||
'--model_path': model_base_name
|
|
||||||
},
|
|
||||||
inputs=[test_dataset.as_named_input('test_data'),
|
|
||||||
lookback_dataset.as_named_input('lookback_data')],
|
|
||||||
compute_target=compute_target,
|
|
||||||
environment_definition=inference_env)
|
|
||||||
|
|
||||||
run = test_experiment.submit(
|
|
||||||
est, tags={
|
|
||||||
'training_run_id': train_run.id,
|
|
||||||
'run_algorithm': train_run.properties['run_algorithm'],
|
|
||||||
'valid_score': train_run.properties['score'],
|
|
||||||
'primary_metric': train_run.properties['primary_metric']
|
|
||||||
})
|
|
||||||
|
|
||||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
|
||||||
return run
|
|
||||||
|
|
||||||
|
|
||||||
def run_multiple_inferences(summary_df, train_experiment, test_experiment,
|
|
||||||
compute_target, script_folder, test_dataset,
|
|
||||||
lookback_dataset, max_horizon, target_column_name,
|
|
||||||
time_column_name, freq):
|
|
||||||
for run_name, run_summary in summary_df.iterrows():
|
|
||||||
print(run_name)
|
|
||||||
print(run_summary)
|
|
||||||
run_id = run_summary.run_id
|
|
||||||
train_run = Run(train_experiment, run_id)
|
|
||||||
|
|
||||||
test_run = run_inference(
|
|
||||||
test_experiment, compute_target, script_folder, train_run,
|
|
||||||
test_dataset, lookback_dataset, max_horizon, target_column_name,
|
|
||||||
time_column_name, freq)
|
|
||||||
|
|
||||||
print(test_run)
|
|
||||||
summary_df.loc[summary_df.run_id == run_id,
|
|
||||||
'test_run_id'] = test_run.id
|
|
||||||
|
|
||||||
return summary_df
|
|
||||||
@@ -1,342 +0,0 @@
|
|||||||
import argparse
|
|
||||||
import os
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
from pandas.tseries.frequencies import to_offset
|
|
||||||
from sklearn.externals import joblib
|
|
||||||
from sklearn.metrics import mean_absolute_error, mean_squared_error
|
|
||||||
|
|
||||||
from azureml.automl.runtime.shared.score import scoring, constants
|
|
||||||
from azureml.core import Run
|
|
||||||
|
|
||||||
try:
|
|
||||||
import torch
|
|
||||||
|
|
||||||
_torch_present = True
|
|
||||||
except ImportError:
|
|
||||||
_torch_present = False
|
|
||||||
|
|
||||||
|
|
||||||
def align_outputs(y_predicted, X_trans, X_test, y_test,
|
|
||||||
predicted_column_name='predicted',
|
|
||||||
horizon_colname='horizon_origin'):
|
|
||||||
"""
|
|
||||||
Demonstrates how to get the output aligned to the inputs
|
|
||||||
using pandas indexes. Helps understand what happened if
|
|
||||||
the output's shape differs from the input shape, or if
|
|
||||||
the data got re-sorted by time and grain during forecasting.
|
|
||||||
|
|
||||||
Typical causes of misalignment are:
|
|
||||||
* we predicted some periods that were missing in actuals -> drop from eval
|
|
||||||
* model was asked to predict past max_horizon -> increase max horizon
|
|
||||||
* data at start of X_test was needed for lags -> provide previous periods
|
|
||||||
"""
|
|
||||||
if (horizon_colname in X_trans):
|
|
||||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
|
|
||||||
horizon_colname: X_trans[horizon_colname]})
|
|
||||||
else:
|
|
||||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
|
|
||||||
|
|
||||||
# y and X outputs are aligned by forecast() function contract
|
|
||||||
df_fcst.index = X_trans.index
|
|
||||||
|
|
||||||
# align original X_test to y_test
|
|
||||||
X_test_full = X_test.copy()
|
|
||||||
X_test_full[target_column_name] = y_test
|
|
||||||
|
|
||||||
# X_test_full's index does not include origin, so reset for merge
|
|
||||||
df_fcst.reset_index(inplace=True)
|
|
||||||
X_test_full = X_test_full.reset_index().drop(columns='index')
|
|
||||||
together = df_fcst.merge(X_test_full, how='right')
|
|
||||||
|
|
||||||
# drop rows where prediction or actuals are nan
|
|
||||||
# happens because of missing actuals
|
|
||||||
# or at edges of time due to lags/rolling windows
|
|
||||||
clean = together[together[[target_column_name,
|
|
||||||
predicted_column_name]].notnull().all(axis=1)]
|
|
||||||
return (clean)
|
|
||||||
|
|
||||||
|
|
||||||
def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
|
||||||
max_horizon, X_lookback, y_lookback,
|
|
||||||
freq='D'):
|
|
||||||
"""
|
|
||||||
Produce forecasts on a rolling origin over the given test set.
|
|
||||||
|
|
||||||
Each iteration makes a forecast for the next 'max_horizon' periods
|
|
||||||
with respect to the current origin, then advances the origin by the
|
|
||||||
horizon time duration. The prediction context for each forecast is set so
|
|
||||||
that the forecaster uses the actual target values prior to the current
|
|
||||||
origin time for constructing lag features.
|
|
||||||
|
|
||||||
This function returns a concatenated DataFrame of rolling forecasts.
|
|
||||||
"""
|
|
||||||
print("Using lookback of size: ", y_lookback.size)
|
|
||||||
df_list = []
|
|
||||||
origin_time = X_test[time_column_name].min()
|
|
||||||
X = X_lookback.append(X_test)
|
|
||||||
y = np.concatenate((y_lookback, y_test), axis=0)
|
|
||||||
while origin_time <= X_test[time_column_name].max():
|
|
||||||
# Set the horizon time - end date of the forecast
|
|
||||||
horizon_time = origin_time + max_horizon * to_offset(freq)
|
|
||||||
|
|
||||||
# Extract test data from an expanding window up-to the horizon
|
|
||||||
expand_wind = (X[time_column_name] < horizon_time)
|
|
||||||
X_test_expand = X[expand_wind]
|
|
||||||
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
|
|
||||||
y_query_expand.fill(np.NaN)
|
|
||||||
|
|
||||||
if origin_time != X[time_column_name].min():
|
|
||||||
# Set the context by including actuals up-to the origin time
|
|
||||||
test_context_expand_wind = (X[time_column_name] < origin_time)
|
|
||||||
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
|
|
||||||
y_query_expand[context_expand_wind] = y[test_context_expand_wind]
|
|
||||||
|
|
||||||
# Print some debug info
|
|
||||||
print("Horizon_time:", horizon_time,
|
|
||||||
" origin_time: ", origin_time,
|
|
||||||
" max_horizon: ", max_horizon,
|
|
||||||
" freq: ", freq)
|
|
||||||
print("expand_wind: ", expand_wind)
|
|
||||||
print("y_query_expand")
|
|
||||||
print(y_query_expand)
|
|
||||||
print("X_test")
|
|
||||||
print(X)
|
|
||||||
print("X_test_expand")
|
|
||||||
print(X_test_expand)
|
|
||||||
print("Type of X_test_expand: ", type(X_test_expand))
|
|
||||||
print("Type of y_query_expand: ", type(y_query_expand))
|
|
||||||
|
|
||||||
print("y_query_expand")
|
|
||||||
print(y_query_expand)
|
|
||||||
|
|
||||||
# Make a forecast out to the maximum horizon
|
|
||||||
# y_fcst, X_trans = y_query_expand, X_test_expand
|
|
||||||
y_fcst, X_trans = fitted_model.forecast(X_test_expand, y_query_expand)
|
|
||||||
|
|
||||||
print("y_fcst")
|
|
||||||
print(y_fcst)
|
|
||||||
|
|
||||||
# Align forecast with test set for dates within
|
|
||||||
# the current rolling window
|
|
||||||
trans_tindex = X_trans.index.get_level_values(time_column_name)
|
|
||||||
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
|
|
||||||
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
|
|
||||||
df_list.append(align_outputs(
|
|
||||||
y_fcst[trans_roll_wind], X_trans[trans_roll_wind],
|
|
||||||
X[test_roll_wind], y[test_roll_wind]))
|
|
||||||
|
|
||||||
# Advance the origin time
|
|
||||||
origin_time = horizon_time
|
|
||||||
|
|
||||||
return pd.concat(df_list, ignore_index=True)
|
|
||||||
|
|
||||||
|
|
||||||
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
|
|
||||||
"""
|
|
||||||
Produce forecasts on a rolling origin over the given test set.
|
|
||||||
|
|
||||||
Each iteration makes a forecast for the next 'max_horizon' periods
|
|
||||||
with respect to the current origin, then advances the origin by the
|
|
||||||
horizon time duration. The prediction context for each forecast is set so
|
|
||||||
that the forecaster uses the actual target values prior to the current
|
|
||||||
origin time for constructing lag features.
|
|
||||||
|
|
||||||
This function returns a concatenated DataFrame of rolling forecasts.
|
|
||||||
"""
|
|
||||||
df_list = []
|
|
||||||
origin_time = X_test[time_column_name].min()
|
|
||||||
while origin_time <= X_test[time_column_name].max():
|
|
||||||
# Set the horizon time - end date of the forecast
|
|
||||||
horizon_time = origin_time + max_horizon * to_offset(freq)
|
|
||||||
|
|
||||||
# Extract test data from an expanding window up-to the horizon
|
|
||||||
expand_wind = (X_test[time_column_name] < horizon_time)
|
|
||||||
X_test_expand = X_test[expand_wind]
|
|
||||||
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
|
|
||||||
y_query_expand.fill(np.NaN)
|
|
||||||
|
|
||||||
if origin_time != X_test[time_column_name].min():
|
|
||||||
# Set the context by including actuals up-to the origin time
|
|
||||||
test_context_expand_wind = (X_test[time_column_name] < origin_time)
|
|
||||||
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
|
|
||||||
y_query_expand[context_expand_wind] = y_test[
|
|
||||||
test_context_expand_wind]
|
|
||||||
|
|
||||||
# Print some debug info
|
|
||||||
print("Horizon_time:", horizon_time,
|
|
||||||
" origin_time: ", origin_time,
|
|
||||||
" max_horizon: ", max_horizon,
|
|
||||||
" freq: ", freq)
|
|
||||||
print("expand_wind: ", expand_wind)
|
|
||||||
print("y_query_expand")
|
|
||||||
print(y_query_expand)
|
|
||||||
print("X_test")
|
|
||||||
print(X_test)
|
|
||||||
print("X_test_expand")
|
|
||||||
print(X_test_expand)
|
|
||||||
print("Type of X_test_expand: ", type(X_test_expand))
|
|
||||||
print("Type of y_query_expand: ", type(y_query_expand))
|
|
||||||
print("y_query_expand")
|
|
||||||
print(y_query_expand)
|
|
||||||
|
|
||||||
# Make a forecast out to the maximum horizon
|
|
||||||
y_fcst, X_trans = fitted_model.forecast(X_test_expand, y_query_expand)
|
|
||||||
|
|
||||||
print("y_fcst")
|
|
||||||
print(y_fcst)
|
|
||||||
|
|
||||||
# Align forecast with test set for dates within the
|
|
||||||
# current rolling window
|
|
||||||
trans_tindex = X_trans.index.get_level_values(time_column_name)
|
|
||||||
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
|
|
||||||
test_roll_wind = expand_wind & (X_test[time_column_name] >= origin_time)
|
|
||||||
df_list.append(align_outputs(y_fcst[trans_roll_wind],
|
|
||||||
X_trans[trans_roll_wind],
|
|
||||||
X_test[test_roll_wind],
|
|
||||||
y_test[test_roll_wind]))
|
|
||||||
|
|
||||||
# Advance the origin time
|
|
||||||
origin_time = horizon_time
|
|
||||||
|
|
||||||
return pd.concat(df_list, ignore_index=True)
|
|
||||||
|
|
||||||
|
|
||||||
def APE(actual, pred):
|
|
||||||
"""
|
|
||||||
Calculate absolute percentage error.
|
|
||||||
Returns a vector of APE values with same length as actual/pred.
|
|
||||||
"""
|
|
||||||
return 100 * np.abs((actual - pred) / actual)
|
|
||||||
|
|
||||||
|
|
||||||
def MAPE(actual, pred):
|
|
||||||
"""
|
|
||||||
Calculate mean absolute percentage error.
|
|
||||||
Remove NA and values where actual is close to zero
|
|
||||||
"""
|
|
||||||
not_na = ~(np.isnan(actual) | np.isnan(pred))
|
|
||||||
not_zero = ~np.isclose(actual, 0.0)
|
|
||||||
actual_safe = actual[not_na & not_zero]
|
|
||||||
pred_safe = pred[not_na & not_zero]
|
|
||||||
return np.mean(APE(actual_safe, pred_safe))
|
|
||||||
|
|
||||||
|
|
||||||
def map_location_cuda(storage, loc):
|
|
||||||
return storage.cuda()
|
|
||||||
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument(
|
|
||||||
'--max_horizon', type=int, dest='max_horizon',
|
|
||||||
default=10, help='Max Horizon for forecasting')
|
|
||||||
parser.add_argument(
|
|
||||||
'--target_column_name', type=str, dest='target_column_name',
|
|
||||||
help='Target Column Name')
|
|
||||||
parser.add_argument(
|
|
||||||
'--time_column_name', type=str, dest='time_column_name',
|
|
||||||
help='Time Column Name')
|
|
||||||
parser.add_argument(
|
|
||||||
'--frequency', type=str, dest='freq',
|
|
||||||
help='Frequency of prediction')
|
|
||||||
parser.add_argument(
|
|
||||||
'--model_path', type=str, dest='model_path',
|
|
||||||
default='model.pkl', help='Filename of model to be loaded')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
max_horizon = args.max_horizon
|
|
||||||
target_column_name = args.target_column_name
|
|
||||||
time_column_name = args.time_column_name
|
|
||||||
freq = args.freq
|
|
||||||
model_path = args.model_path
|
|
||||||
|
|
||||||
print('args passed are: ')
|
|
||||||
print(max_horizon)
|
|
||||||
print(target_column_name)
|
|
||||||
print(time_column_name)
|
|
||||||
print(freq)
|
|
||||||
print(model_path)
|
|
||||||
|
|
||||||
run = Run.get_context()
|
|
||||||
# get input dataset by name
|
|
||||||
test_dataset = run.input_datasets['test_data']
|
|
||||||
lookback_dataset = run.input_datasets['lookback_data']
|
|
||||||
|
|
||||||
grain_column_names = []
|
|
||||||
|
|
||||||
df = test_dataset.to_pandas_dataframe()
|
|
||||||
|
|
||||||
print('Read df')
|
|
||||||
print(df)
|
|
||||||
|
|
||||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name])
|
|
||||||
y_test_df = test_dataset.with_timestamp_columns(
|
|
||||||
None).keep_columns(columns=[target_column_name])
|
|
||||||
|
|
||||||
X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name])
|
|
||||||
y_lookback_df = lookback_dataset.with_timestamp_columns(
|
|
||||||
None).keep_columns(columns=[target_column_name])
|
|
||||||
|
|
||||||
_, ext = os.path.splitext(model_path)
|
|
||||||
if ext == '.pt':
|
|
||||||
# Load the fc-tcn torch model.
|
|
||||||
assert _torch_present
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
map_location = map_location_cuda
|
|
||||||
else:
|
|
||||||
map_location = 'cpu'
|
|
||||||
with open(model_path, 'rb') as fh:
|
|
||||||
fitted_model = torch.load(fh, map_location=map_location)
|
|
||||||
else:
|
|
||||||
# Load the sklearn pipeline.
|
|
||||||
fitted_model = joblib.load(model_path)
|
|
||||||
|
|
||||||
if hasattr(fitted_model, 'get_lookback'):
|
|
||||||
lookback = fitted_model.get_lookback()
|
|
||||||
df_all = do_rolling_forecast_with_lookback(
|
|
||||||
fitted_model,
|
|
||||||
X_test_df.to_pandas_dataframe(),
|
|
||||||
y_test_df.to_pandas_dataframe().values.T[0],
|
|
||||||
max_horizon,
|
|
||||||
X_lookback_df.to_pandas_dataframe()[-lookback:],
|
|
||||||
y_lookback_df.to_pandas_dataframe().values.T[0][-lookback:],
|
|
||||||
freq)
|
|
||||||
else:
|
|
||||||
df_all = do_rolling_forecast(
|
|
||||||
fitted_model,
|
|
||||||
X_test_df.to_pandas_dataframe(),
|
|
||||||
y_test_df.to_pandas_dataframe().values.T[0],
|
|
||||||
max_horizon,
|
|
||||||
freq)
|
|
||||||
|
|
||||||
print(df_all)
|
|
||||||
|
|
||||||
print("target values:::")
|
|
||||||
print(df_all[target_column_name])
|
|
||||||
print("predicted values:::")
|
|
||||||
print(df_all['predicted'])
|
|
||||||
|
|
||||||
# Use the AutoML scoring module
|
|
||||||
regression_metrics = list(constants.REGRESSION_SCALAR_SET)
|
|
||||||
y_test = np.array(df_all[target_column_name])
|
|
||||||
y_pred = np.array(df_all['predicted'])
|
|
||||||
scores = scoring.score_regression(y_test, y_pred, regression_metrics)
|
|
||||||
|
|
||||||
print("scores:")
|
|
||||||
print(scores)
|
|
||||||
|
|
||||||
for key, value in scores.items():
|
|
||||||
run.log(key, value)
|
|
||||||
|
|
||||||
print("Simple forecasting model")
|
|
||||||
rmse = np.sqrt(mean_squared_error(
|
|
||||||
df_all[target_column_name], df_all['predicted']))
|
|
||||||
print("[Test Data] \nRoot Mean squared error: %.2f" % rmse)
|
|
||||||
mae = mean_absolute_error(df_all[target_column_name], df_all['predicted'])
|
|
||||||
print('mean_absolute_error score: %.2f' % mae)
|
|
||||||
print('MAPE: %.2f' % MAPE(df_all[target_column_name], df_all['predicted']))
|
|
||||||
|
|
||||||
run.log('rmse', rmse)
|
|
||||||
run.log('mae', mae)
|
|
||||||
@@ -16,6 +16,13 @@
|
|||||||
""
|
""
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<font color=\"red\" size=\"5\"><strong>!Important!</strong> </br>This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-task-bike-share)).</font>"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -42,7 +49,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"AutoML highlights here include built-in holiday featurization, accessing engineered feature names, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
|
"AutoML highlights here include built-in holiday featurization, accessing engineered feature names, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Make sure you have executed the [configuration notebook](../../../configuration.ipynb) before running this notebook.\n",
|
"Make sure you have executed the [configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) before running this notebook.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Notebook synopsis:\n",
|
"Notebook synopsis:\n",
|
||||||
"1. Creating an Experiment in an existing Workspace\n",
|
"1. Creating an Experiment in an existing Workspace\n",
|
||||||
@@ -61,24 +68,30 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1680248038565
|
||||||
|
}
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import azureml.core\n",
|
"import json\n",
|
||||||
"import pandas as pd\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import logging\n",
|
"import logging\n",
|
||||||
|
"from datetime import datetime\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from azureml.core import Workspace, Experiment, Dataset\n",
|
"import azureml.core\n",
|
||||||
"from azureml.train.automl import AutoMLConfig\n",
|
"import numpy as np\n",
|
||||||
"from datetime import datetime"
|
"import pandas as pd\n",
|
||||||
|
"from azureml.automl.core.featurization import FeaturizationConfig\n",
|
||||||
|
"from azureml.core import Dataset, Experiment, Workspace\n",
|
||||||
|
"from azureml.train.automl import AutoMLConfig"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -87,7 +100,6 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -107,19 +119,20 @@
|
|||||||
"ws = Workspace.from_config()\n",
|
"ws = Workspace.from_config()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# choose a name for the run history container in the workspace\n",
|
"# choose a name for the run history container in the workspace\n",
|
||||||
"experiment_name = 'automl-bikeshareforecasting'\n",
|
"experiment_name = \"automl-bikeshareforecasting\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"experiment = Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = {}\n",
|
"output = {}\n",
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
"output['Workspace'] = ws.name\n",
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
"output['SKU'] = ws.sku\n",
|
"output[\"SKU\"] = ws.sku\n",
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output['Run History Name'] = experiment_name\n",
|
"output[\"Run History Name\"] = experiment_name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -129,6 +142,9 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Compute\n",
|
"## Compute\n",
|
||||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||||
|
"\n",
|
||||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||||
@@ -149,10 +165,11 @@
|
|||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
" max_nodes=4)\n",
|
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||||
|
" )\n",
|
||||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
@@ -164,23 +181,6 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Data\n",
|
"## Data\n",
|
||||||
"\n",
|
"\n",
|
||||||
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace) is paired with the storage account, which contains the default data store. We will use it to upload the bike share data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"datastore = ws.get_default_datastore()\n",
|
|
||||||
"datastore.upload_files(files = ['./bike-no.csv'], target_path = 'dataset/', overwrite = True,show_progress = True)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Let's set up what we know about the dataset. \n",
|
"Let's set up what we know about the dataset. \n",
|
||||||
"\n",
|
"\n",
|
||||||
"**Target column** is what we want to forecast.\n",
|
"**Target column** is what we want to forecast.\n",
|
||||||
@@ -194,27 +194,54 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"target_column_name = 'cnt'\n",
|
"target_column_name = \"cnt\"\n",
|
||||||
"time_column_name = 'date'"
|
"time_column_name = \"date\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"nteract": {
|
||||||
|
"transient": {
|
||||||
|
"deleting": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"You are now ready to load the historical bike share data. We will load the CSV file into a plain pandas DataFrame."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"jupyter": {
|
||||||
|
"outputs_hidden": false,
|
||||||
|
"source_hidden": false
|
||||||
|
},
|
||||||
|
"nteract": {
|
||||||
|
"transient": {
|
||||||
|
"deleting": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'dataset/bike-no.csv')]).with_timestamp_columns(fine_grain_timestamp=time_column_name) \n",
|
"all_data = pd.read_csv(\"bike-no.csv\", parse_dates=[time_column_name])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Drop the columns 'casual' and 'registered' as these columns are a breakdown of the total and therefore a leak.\n",
|
"# Drop the columns 'casual' and 'registered' as these columns are a breakdown of the total and therefore a leak.\n",
|
||||||
"dataset = dataset.drop_columns(columns=['casual', 'registered'])\n",
|
"all_data.drop([\"casual\", \"registered\"], axis=1, inplace=True)"
|
||||||
"\n",
|
|
||||||
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"nteract": {
|
||||||
|
"transient": {
|
||||||
|
"deleting": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"### Split the data\n",
|
"### Split the data\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -224,22 +251,63 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1680247376789
|
||||||
|
},
|
||||||
|
"jupyter": {
|
||||||
|
"outputs_hidden": false,
|
||||||
|
"source_hidden": false
|
||||||
|
},
|
||||||
|
"nteract": {
|
||||||
|
"transient": {
|
||||||
|
"deleting": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# select data that occurs before a specified date\n",
|
"# select data that occurs before a specified date\n",
|
||||||
"train = dataset.time_before(datetime(2012, 8, 31), include_boundary=True)\n",
|
"train = all_data[all_data[time_column_name] <= pd.Timestamp(\"2012-08-31\")].copy()\n",
|
||||||
"train.to_pandas_dataframe().tail(5).reset_index(drop=True)"
|
"test = all_data[all_data[time_column_name] >= pd.Timestamp(\"2012-09-01\")].copy()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Upload data to datastore\n",
|
||||||
|
"\n",
|
||||||
|
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace) is paired with the storage account, which contains the default data store. We will use it to upload the bike share data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"jupyter": {
|
||||||
|
"outputs_hidden": false,
|
||||||
|
"source_hidden": false
|
||||||
|
},
|
||||||
|
"nteract": {
|
||||||
|
"transient": {
|
||||||
|
"deleting": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"test = dataset.time_after(datetime(2012, 9, 1), include_boundary=True)\n",
|
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||||
"test.to_pandas_dataframe().head(5).reset_index(drop=True)"
|
"\n",
|
||||||
|
"datastore = ws.get_default_datastore()\n",
|
||||||
|
"\n",
|
||||||
|
"train_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||||
|
" train, target=(datastore, \"dataset/\"), name=\"bike_no_train\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"test_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||||
|
" test, target=(datastore, \"dataset/\"), name=\"bike_no_test\"\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -255,7 +323,8 @@
|
|||||||
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n",
|
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n",
|
||||||
"|**country_or_region_for_holidays**|The country/region used to generate holiday features. These should be ISO 3166 two-letter country/region codes (i.e. 'US', 'GB').|\n",
|
"|**country_or_region_for_holidays**|The country/region used to generate holiday features. These should be ISO 3166 two-letter country/region codes (i.e. 'US', 'GB').|\n",
|
||||||
"|**target_lags**|The target_lags specifies how far back we will construct the lags of the target variable.|\n",
|
"|**target_lags**|The target_lags specifies how far back we will construct the lags of the target variable.|\n",
|
||||||
"|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information."
|
"|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.\n",
|
||||||
|
"|**cv_step_size**|Number of periods between two consecutive cross-validation folds. The default value is \"auto\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -275,7 +344,7 @@
|
|||||||
"|**training_data**|Input dataset, containing both features and label column.|\n",
|
"|**training_data**|Input dataset, containing both features and label column.|\n",
|
||||||
"|**label_column_name**|The name of the label column.|\n",
|
"|**label_column_name**|The name of the label column.|\n",
|
||||||
"|**compute_target**|The remote compute for training.|\n",
|
"|**compute_target**|The remote compute for training.|\n",
|
||||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
"|**n_cross_validations**|Number of cross-validation folds to use for model/pipeline selection. The default value is \"auto\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value.\n",
|
||||||
"|**enable_early_stopping**|If early stopping is on, training will stop when the primary metric is no longer improving.|\n",
|
"|**enable_early_stopping**|If early stopping is on, training will stop when the primary metric is no longer improving.|\n",
|
||||||
"|**forecasting_parameters**|A class that holds all the forecasting related parameters.|\n",
|
"|**forecasting_parameters**|A class that holds all the forecasting related parameters.|\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -300,6 +369,25 @@
|
|||||||
"forecast_horizon = 14"
|
"forecast_horizon = 14"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Convert prediction type to integer\n",
|
||||||
|
"The featurization configuration can be used to change the default prediction type from decimal numbers to integer. This customization can be used in the scenario when the target column is expected to contain whole values as the number of rented bikes per day."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"featurization_config = FeaturizationConfig()\n",
|
||||||
|
"# Force the target column, to be integer type.\n",
|
||||||
|
"featurization_config.add_prediction_transform_type(\"Integer\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -314,26 +402,32 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||||
|
"\n",
|
||||||
"forecasting_parameters = ForecastingParameters(\n",
|
"forecasting_parameters = ForecastingParameters(\n",
|
||||||
" time_column_name=time_column_name,\n",
|
" time_column_name=time_column_name,\n",
|
||||||
" forecast_horizon=forecast_horizon,\n",
|
" forecast_horizon=forecast_horizon,\n",
|
||||||
" country_or_region_for_holidays='US', # set country_or_region will trigger holiday featurizer\n",
|
" country_or_region_for_holidays=\"US\", # set country_or_region will trigger holiday featurizer\n",
|
||||||
" target_lags='auto' # use heuristic based lag setting \n",
|
" target_lags=\"auto\", # use heuristic based lag setting\n",
|
||||||
|
" freq=\"D\", # Set the forecast frequency to be daily\n",
|
||||||
|
" cv_step_size=\"auto\",\n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
" task=\"forecasting\",\n",
|
||||||
" blocked_models = ['ExtremeRandomTrees'], \n",
|
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||||
" experiment_timeout_hours=0.3,\n",
|
" featurization=featurization_config,\n",
|
||||||
" training_data=train,\n",
|
" blocked_models=[\"ExtremeRandomTrees\"],\n",
|
||||||
" label_column_name=target_column_name,\n",
|
" experiment_timeout_hours=0.3,\n",
|
||||||
" compute_target=compute_target,\n",
|
" training_data=train_dataset,\n",
|
||||||
" enable_early_stopping=True,\n",
|
" label_column_name=target_column_name,\n",
|
||||||
" n_cross_validations=3, \n",
|
" compute_target=compute_target,\n",
|
||||||
" max_concurrent_iterations=4,\n",
|
" enable_early_stopping=True,\n",
|
||||||
" max_cores_per_iteration=-1,\n",
|
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||||
" verbosity=logging.INFO,\n",
|
" max_concurrent_iterations=4,\n",
|
||||||
" forecasting_parameters=forecasting_parameters)"
|
" max_cores_per_iteration=-1,\n",
|
||||||
|
" verbosity=logging.INFO,\n",
|
||||||
|
" forecasting_parameters=forecasting_parameters,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -349,8 +443,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"remote_run = experiment.submit(automl_config, show_output=False)\n",
|
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||||
"remote_run"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -366,8 +459,8 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Retrieve the Best Model\n",
|
"### Retrieve the Best Run details\n",
|
||||||
"Below we select the best model from all the training iterations using get_output method."
|
"Below we retrieve the best Run object from among all the runs in the experiment."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -376,8 +469,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"best_run, fitted_model = remote_run.get_output()\n",
|
"best_run = remote_run.get_best_child()\n",
|
||||||
"fitted_model.steps"
|
"best_run"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -386,7 +479,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Featurization\n",
|
"## Featurization\n",
|
||||||
"\n",
|
"\n",
|
||||||
"You can access the engineered feature names generated in time-series featurization. Note that a number of named holiday periods are represented. We recommend that you have at least one year of data when using this feature to ensure that all yearly holidays are captured in the training featurization."
|
"We can look at the engineered feature names generated in time-series featurization via. the JSON file named 'engineered_feature_names.json' under the run outputs. Note that a number of named holiday periods are represented. We recommend that you have at least one year of data when using this feature to ensure that all yearly holidays are captured in the training featurization."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -395,7 +488,14 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()"
|
"# Download the JSON file locally\n",
|
||||||
|
"best_run.download_file(\n",
|
||||||
|
" \"outputs/engineered_feature_names.json\", \"engineered_feature_names.json\"\n",
|
||||||
|
")\n",
|
||||||
|
"with open(\"engineered_feature_names.json\", \"r\") as f:\n",
|
||||||
|
" records = json.load(f)\n",
|
||||||
|
"\n",
|
||||||
|
"records"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -419,10 +519,26 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Get the featurization summary as a list of JSON\n",
|
"# Download the featurization summary JSON file locally\n",
|
||||||
"featurization_summary = fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()\n",
|
"best_run.download_file(\n",
|
||||||
"# View the featurization summary as a pandas dataframe\n",
|
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||||
"pd.DataFrame.from_records(featurization_summary)"
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# Render the JSON as a pandas DataFrame\n",
|
||||||
|
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||||
|
" records = json.load(f)\n",
|
||||||
|
"fs = pd.DataFrame.from_records(records)\n",
|
||||||
|
"\n",
|
||||||
|
"# View a summary of the featurization\n",
|
||||||
|
"fs[\n",
|
||||||
|
" [\n",
|
||||||
|
" \"RawFeatureName\",\n",
|
||||||
|
" \"TypeDetected\",\n",
|
||||||
|
" \"Dropped\",\n",
|
||||||
|
" \"EngineeredFeatureCount\",\n",
|
||||||
|
" \"Transformations\",\n",
|
||||||
|
" ]\n",
|
||||||
|
"]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -467,9 +583,9 @@
|
|||||||
"import os\n",
|
"import os\n",
|
||||||
"import shutil\n",
|
"import shutil\n",
|
||||||
"\n",
|
"\n",
|
||||||
"script_folder = os.path.join(os.getcwd(), 'forecast')\n",
|
"script_folder = os.path.join(os.getcwd(), \"forecast\")\n",
|
||||||
"os.makedirs(script_folder, exist_ok=True)\n",
|
"os.makedirs(script_folder, exist_ok=True)\n",
|
||||||
"shutil.copy('forecasting_script.py', script_folder)"
|
"shutil.copy(\"forecasting_script.py\", script_folder)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -487,7 +603,9 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from run_forecast import run_rolling_forecast\n",
|
"from run_forecast import run_rolling_forecast\n",
|
||||||
"\n",
|
"\n",
|
||||||
"remote_run = run_rolling_forecast(test_experiment, compute_target, best_run, test, target_column_name)\n",
|
"remote_run = run_rolling_forecast(\n",
|
||||||
|
" test_experiment, compute_target, best_run, test_dataset, target_column_name\n",
|
||||||
|
")\n",
|
||||||
"remote_run"
|
"remote_run"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -504,7 +622,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Download the prediction result for metrics calcuation\n",
|
"### Download the prediction result for metrics calculation\n",
|
||||||
"The test data with predictions are saved in artifact outputs/predictions.csv. You can download it and calculation some error metrics for the forecasts and vizualize the predictions vs. the actuals."
|
"The test data with predictions are saved in artifact outputs/predictions.csv. You can download it and calculation some error metrics for the forecasts and vizualize the predictions vs. the actuals."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -514,8 +632,33 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"remote_run.download_file('outputs/predictions.csv', 'predictions.csv')\n",
|
"remote_run.download_file(\"outputs/predictions.csv\", \"predictions.csv\")\n",
|
||||||
"df_all = pd.read_csv('predictions.csv')"
|
"fcst_df = pd.read_csv(\"predictions.csv\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Note that the rolling forecast can contain multiple predictions for each date, each from a different forecast origin. For example, consider 2012-09-05:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"fcst_df[fcst_df.date == \"2012-09-05\"]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Here, the forecast origin refers to the latest date of actuals available for a given forecast. The earliest origin in the rolling forecast, 2012-08-31, is the last day in the training data. For origin date 2012-09-01, the forecasts use actual recorded counts from the training data *and* the actual count recorded on 2012-09-01. Note that the model is not retrained for origin dates later than 2012-08-31, but the values for model features, such as lagged values of daily count, are updated.\n",
|
||||||
|
"\n",
|
||||||
|
"Let's calculate the metrics over all rolling forecasts:"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -527,24 +670,17 @@
|
|||||||
"from azureml.automl.core.shared import constants\n",
|
"from azureml.automl.core.shared import constants\n",
|
||||||
"from azureml.automl.runtime.shared.score import scoring\n",
|
"from azureml.automl.runtime.shared.score import scoring\n",
|
||||||
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
|
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
|
||||||
"from matplotlib import pyplot as plt\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"# use automl metrics module\n",
|
"# use automl metrics module\n",
|
||||||
"scores = scoring.score_regression(\n",
|
"scores = scoring.score_regression(\n",
|
||||||
" y_test=df_all[target_column_name],\n",
|
" y_test=fcst_df[target_column_name],\n",
|
||||||
" y_pred=df_all['predicted'],\n",
|
" y_pred=fcst_df[\"predicted\"],\n",
|
||||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
|
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"[Test data scores]\\n\")\n",
|
"print(\"[Test data scores]\\n\")\n",
|
||||||
"for key, value in scores.items(): \n",
|
"for key, value in scores.items():\n",
|
||||||
" print('{}: {:.3f}'.format(key, value))\n",
|
" print(\"{}: {:.3f}\".format(key, value))"
|
||||||
" \n",
|
|
||||||
"# Plot outputs\n",
|
|
||||||
"%matplotlib inline\n",
|
|
||||||
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
|
|
||||||
"test_test = plt.scatter(df_all[target_column_name], df_all[target_column_name], color='g')\n",
|
|
||||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
|
||||||
"plt.show()"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -553,28 +689,15 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"For more details on what metrics are included and how they are calculated, please refer to [supported metrics](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#regressionforecasting-metrics). You could also calculate residuals, like described [here](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#residuals).\n",
|
"For more details on what metrics are included and how they are calculated, please refer to [supported metrics](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#regressionforecasting-metrics). You could also calculate residuals, like described [here](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#residuals).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"The rolling forecast metric values are very high in comparison to the validation metrics reported by the AutoML job. What's going on here? We will investigate in the following cells!"
|
||||||
"Since we did a rolling evaluation on the test set, we can analyze the predictions by their forecast horizon relative to the rolling origin. The model was initially trained at a forecast horizon of 14, so each prediction from the model is associated with a horizon value from 1 to 14. The horizon values are in a column named, \"horizon_origin,\" in the prediction set. For example, we can calculate some of the error metrics grouped by the horizon:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from metrics_helper import MAPE, APE\n",
|
|
||||||
"df_all.groupby('horizon_origin').apply(\n",
|
|
||||||
" lambda df: pd.Series({'MAPE': MAPE(df[target_column_name], df['predicted']),\n",
|
|
||||||
" 'RMSE': np.sqrt(mean_squared_error(df[target_column_name], df['predicted'])),\n",
|
|
||||||
" 'MAE': mean_absolute_error(df[target_column_name], df['predicted'])}))"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"To drill down more, we can look at the distributions of APE (absolute percentage error) by horizon. From the chart, it is clear that the overall MAPE is being skewed by one particular point where the actual value is of small absolute value."
|
"### Forecast versus actuals plot\n",
|
||||||
|
"We will plot predictions and actuals on a time series plot. Since there are many forecasts for each date, we select the 14-day-ahead forecast from each forecast origin for our comparison."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -583,18 +706,55 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"df_all_APE = df_all.assign(APE=APE(df_all[target_column_name], df_all['predicted']))\n",
|
"from matplotlib import pyplot as plt\n",
|
||||||
"APEs = [df_all_APE[df_all['horizon_origin'] == h].APE.values for h in range(1, forecast_horizon + 1)]\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"%matplotlib inline\n",
|
"%matplotlib inline\n",
|
||||||
"plt.boxplot(APEs)\n",
|
|
||||||
"plt.yscale('log')\n",
|
|
||||||
"plt.xlabel('horizon')\n",
|
|
||||||
"plt.ylabel('APE (%)')\n",
|
|
||||||
"plt.title('Absolute Percentage Errors by Forecast Horizon')\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
|
"fcst_df_h14 = (\n",
|
||||||
|
" fcst_df.groupby(\"forecast_origin\", as_index=False)\n",
|
||||||
|
" .last()\n",
|
||||||
|
" .drop(columns=[\"forecast_origin\"])\n",
|
||||||
|
")\n",
|
||||||
|
"fcst_df_h14.set_index(time_column_name, inplace=True)\n",
|
||||||
|
"plt.plot(fcst_df_h14[[target_column_name, \"predicted\"]])\n",
|
||||||
|
"plt.xticks(rotation=45)\n",
|
||||||
|
"plt.title(f\"Predicted vs. Actuals\")\n",
|
||||||
|
"plt.legend([\"actual\", \"14-day-ahead forecast\"])\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Looking at the plot, there are two clear issues:\n",
|
||||||
|
"1. An anomalously low count value on October 29th, 2012.\n",
|
||||||
|
"2. End-of-year holidays (Thanksgiving and Christmas) in late November and late December.\n",
|
||||||
|
"\n",
|
||||||
|
"What happened on Oct. 29th, 2012? That day, Hurricane Sandy brought severe storm surge flooding to the east coast of the United States, particularly around New York City. This is certainly an anomalous event that the model did not account for!\n",
|
||||||
|
"\n",
|
||||||
|
"As for the late year holidays, the model apparently did not learn to account for the full reduction of bike share rentals on these major holidays. The training data covers 2011 and early 2012, so the model fit only had access to a single occurrence of these holidays. This makes it challenging to resolve holiday effects; however, a larger AutoML model search may result in a better model that is more holiday-aware.\n",
|
||||||
|
"\n",
|
||||||
|
"If we filter the predictions prior to the Thanksgiving holiday and remove the anomalous day of 2012-10-29, the metrics are closer to validation levels:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"date_filter = (fcst_df.date != \"2012-10-29\") & (fcst_df.date < \"2012-11-22\")\n",
|
||||||
|
"scores = scoring.score_regression(\n",
|
||||||
|
" y_test=fcst_df[date_filter][target_column_name],\n",
|
||||||
|
" y_pred=fcst_df[date_filter][\"predicted\"],\n",
|
||||||
|
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"[Test data scores (filtered)]\\n\")\n",
|
||||||
|
"for key, value in scores.items():\n",
|
||||||
|
" print(\"{}: {:.3f}\".format(key, value))"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
@@ -620,10 +780,13 @@
|
|||||||
],
|
],
|
||||||
"friendly_name": "Forecasting BikeShare Demand",
|
"friendly_name": "Forecasting BikeShare Demand",
|
||||||
"index_order": 1,
|
"index_order": 1,
|
||||||
|
"kernel_info": {
|
||||||
|
"name": "python38-azureml"
|
||||||
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.6",
|
"display_name": "Python 3.8 - AzureML",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python36"
|
"name": "python38-azureml"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
@@ -635,17 +798,30 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.6.7"
|
"version": "3.8.10"
|
||||||
|
},
|
||||||
|
"microsoft": {
|
||||||
|
"ms_spell_check": {
|
||||||
|
"ms_spell_check_language": "en"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"mimetype": "text/x-python",
|
"mimetype": "text/x-python",
|
||||||
"name": "python",
|
"name": "python",
|
||||||
"npconvert_exporter": "python",
|
"npconvert_exporter": "python",
|
||||||
|
"nteract": {
|
||||||
|
"version": "nteract-front-end@1.0.0"
|
||||||
|
},
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"tags": [
|
"tags": [
|
||||||
"Forecasting"
|
"Forecasting"
|
||||||
],
|
],
|
||||||
"task": "Forecasting",
|
"task": "Forecasting",
|
||||||
"version": 3
|
"version": 3,
|
||||||
|
"vscode": {
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca"
|
||||||
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 4
|
"nbformat_minor": 4
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-bike-share
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -1,14 +1,17 @@
|
|||||||
import argparse
|
import argparse
|
||||||
from azureml.core import Dataset, Run
|
from azureml.core import Dataset, Run
|
||||||
from sklearn.externals import joblib
|
import joblib
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--target_column_name', type=str, dest='target_column_name',
|
"--target_column_name",
|
||||||
help='Target Column Name')
|
type=str,
|
||||||
|
dest="target_column_name",
|
||||||
|
help="Target Column Name",
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--test_dataset', type=str, dest='test_dataset',
|
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
|
||||||
help='Test Dataset')
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
target_column_name = args.target_column_name
|
target_column_name = args.target_column_name
|
||||||
@@ -20,20 +23,31 @@ ws = run.experiment.workspace
|
|||||||
# get the input dataset by id
|
# get the input dataset by id
|
||||||
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
|
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
|
||||||
|
|
||||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name]).to_pandas_dataframe().reset_index(drop=True)
|
X_test_df = (
|
||||||
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(columns=[target_column_name]).to_pandas_dataframe()
|
test_dataset.drop_columns(columns=[target_column_name])
|
||||||
|
.to_pandas_dataframe()
|
||||||
|
.reset_index(drop=True)
|
||||||
|
)
|
||||||
|
y_test_df = (
|
||||||
|
test_dataset.with_timestamp_columns(None)
|
||||||
|
.keep_columns(columns=[target_column_name])
|
||||||
|
.to_pandas_dataframe()
|
||||||
|
)
|
||||||
|
|
||||||
fitted_model = joblib.load('model.pkl')
|
fitted_model = joblib.load("model.pkl")
|
||||||
|
|
||||||
y_pred, X_trans = fitted_model.rolling_evaluation(X_test_df, y_test_df.values)
|
X_rf = fitted_model.rolling_forecast(X_test_df, y_test_df.values, step=1)
|
||||||
|
|
||||||
# Add predictions, actuals, and horizon relative to rolling origin to the test feature data
|
# Add predictions, actuals, and horizon relative to rolling origin to the test feature data
|
||||||
assign_dict = {'horizon_origin': X_trans['horizon_origin'].values, 'predicted': y_pred,
|
assign_dict = {
|
||||||
target_column_name: y_test_df[target_column_name].values}
|
fitted_model.forecast_origin_column_name: "forecast_origin",
|
||||||
df_all = X_test_df.assign(**assign_dict)
|
fitted_model.forecast_column_name: "predicted",
|
||||||
|
fitted_model.actual_column_name: target_column_name,
|
||||||
|
}
|
||||||
|
X_rf.rename(columns=assign_dict, inplace=True)
|
||||||
|
|
||||||
file_name = 'outputs/predictions.csv'
|
file_name = "outputs/predictions.csv"
|
||||||
export_csv = df_all.to_csv(file_name, header=True)
|
export_csv = X_rf.to_csv(file_name, header=True)
|
||||||
|
|
||||||
# Upload the predictions into artifacts
|
# Upload the predictions into artifacts
|
||||||
run.upload_file(name=file_name, path_or_stream=file_name)
|
run.upload_file(name=file_name, path_or_stream=file_name)
|
||||||
|
|||||||
@@ -1,32 +1,40 @@
|
|||||||
from azureml.core import ScriptRunConfig
|
from azureml.core import ScriptRunConfig
|
||||||
|
|
||||||
|
|
||||||
def run_rolling_forecast(test_experiment, compute_target, train_run,
|
def run_rolling_forecast(
|
||||||
test_dataset, target_column_name,
|
test_experiment,
|
||||||
inference_folder='./forecast'):
|
compute_target,
|
||||||
train_run.download_file('outputs/model.pkl',
|
train_run,
|
||||||
inference_folder + '/model.pkl')
|
test_dataset,
|
||||||
|
target_column_name,
|
||||||
|
inference_folder="./forecast",
|
||||||
|
):
|
||||||
|
train_run.download_file("outputs/model.pkl", inference_folder + "/model.pkl")
|
||||||
|
|
||||||
inference_env = train_run.get_environment()
|
inference_env = train_run.get_environment()
|
||||||
|
|
||||||
config = ScriptRunConfig(source_directory=inference_folder,
|
config = ScriptRunConfig(
|
||||||
script='forecasting_script.py',
|
source_directory=inference_folder,
|
||||||
arguments=['--target_column_name',
|
script="forecasting_script.py",
|
||||||
target_column_name,
|
arguments=[
|
||||||
'--test_dataset',
|
"--target_column_name",
|
||||||
test_dataset.as_named_input(test_dataset.name)],
|
target_column_name,
|
||||||
compute_target=compute_target,
|
"--test_dataset",
|
||||||
environment=inference_env)
|
test_dataset.as_named_input(test_dataset.name),
|
||||||
|
],
|
||||||
|
compute_target=compute_target,
|
||||||
|
environment=inference_env,
|
||||||
|
)
|
||||||
|
|
||||||
run = test_experiment.submit(config,
|
run = test_experiment.submit(
|
||||||
tags={'training_run_id':
|
config,
|
||||||
train_run.id,
|
tags={
|
||||||
'run_algorithm':
|
"training_run_id": train_run.id,
|
||||||
train_run.properties['run_algorithm'],
|
"run_algorithm": train_run.properties["run_algorithm"],
|
||||||
'valid_score':
|
"valid_score": train_run.properties["score"],
|
||||||
train_run.properties['score'],
|
"primary_metric": train_run.properties["primary_metric"],
|
||||||
'primary_metric':
|
},
|
||||||
train_run.properties['primary_metric']})
|
)
|
||||||
|
|
||||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||||
return run
|
return run
|
||||||
|
|||||||
@@ -16,6 +16,13 @@
|
|||||||
""
|
""
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<font color=\"red\" size=\"5\"><strong>!Important!</strong> </br>This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/blob/main/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-task-energy-demand/automl-forecasting-task-energy-demand-advanced-mlflow.ipynb)).</font>"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -24,10 +31,11 @@
|
|||||||
"_**Forecasting using the Energy Demand Dataset**_\n",
|
"_**Forecasting using the Energy Demand Dataset**_\n",
|
||||||
"\n",
|
"\n",
|
||||||
"## Contents\n",
|
"## Contents\n",
|
||||||
"1. [Introduction](#Introduction)\n",
|
"1. [Introduction](#introduction)\n",
|
||||||
"1. [Setup](#Setup)\n",
|
"1. [Setup](#setup)\n",
|
||||||
"1. [Data and Forecasting Configurations](#Data)\n",
|
"1. [Data and Forecasting Configurations](#data)\n",
|
||||||
"1. [Train](#Train)\n",
|
"1. [Train](#train)\n",
|
||||||
|
"1. [Generate and Evaluate the Forecast](#forecast)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Advanced Forecasting\n",
|
"Advanced Forecasting\n",
|
||||||
"1. [Advanced Training](#advanced_training)\n",
|
"1. [Advanced Training](#advanced_training)\n",
|
||||||
@@ -38,26 +46,27 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Introduction\n",
|
"# Introduction<a id=\"introduction\"></a>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In this example we use the associated New York City energy demand dataset to showcase how you can use AutoML for a simple forecasting problem and explore the results. The goal is predict the energy demand for the next 48 hours based on historic time-series data.\n",
|
"In this example we use the associated New York City energy demand dataset to showcase how you can use AutoML for a simple forecasting problem and explore the results. The goal is predict the energy demand for the next 48 hours based on historic time-series data.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) first, if you haven't already, to establish your connection to the AzureML Workspace.\n",
|
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) first, if you haven't already, to establish your connection to the AzureML Workspace.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In this notebook you will learn how to:\n",
|
"In this notebook you will learn how to:\n",
|
||||||
"1. Creating an Experiment using an existing Workspace\n",
|
"1. Creating an Experiment using an existing Workspace\n",
|
||||||
"1. Configure AutoML using 'AutoMLConfig'\n",
|
"1. Configure AutoML using 'AutoMLConfig'\n",
|
||||||
"1. Train the model using AmlCompute\n",
|
"1. Train the model using AmlCompute\n",
|
||||||
"1. Explore the engineered features and results\n",
|
"1. Explore the engineered features and results\n",
|
||||||
|
"1. Generate the forecast and compute the out-of-sample accuracy metrics\n",
|
||||||
"1. Configuration and remote run of AutoML for a time-series model with lag and rolling window features\n",
|
"1. Configuration and remote run of AutoML for a time-series model with lag and rolling window features\n",
|
||||||
"1. Run and explore the forecast"
|
"1. Run and explore the forecast with lagging features"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Setup"
|
"# Setup<a id=\"setup\"></a>"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -66,6 +75,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
"import json\n",
|
||||||
"import logging\n",
|
"import logging\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\n",
|
"from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\n",
|
||||||
@@ -88,7 +98,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -97,7 +107,6 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -117,7 +126,7 @@
|
|||||||
"ws = Workspace.from_config()\n",
|
"ws = Workspace.from_config()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# choose a name for the run history container in the workspace\n",
|
"# choose a name for the run history container in the workspace\n",
|
||||||
"experiment_name = 'automl-forecasting-energydemand'\n",
|
"experiment_name = \"automl-forecasting-energydemand\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# # project folder\n",
|
"# # project folder\n",
|
||||||
"# project_folder = './sample_projects/automl-forecasting-energy-demand'\n",
|
"# project_folder = './sample_projects/automl-forecasting-energy-demand'\n",
|
||||||
@@ -125,13 +134,14 @@
|
|||||||
"experiment = Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = {}\n",
|
"output = {}\n",
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
"output['Workspace'] = ws.name\n",
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output['Run History Name'] = experiment_name\n",
|
"output[\"Run History Name\"] = experiment_name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -164,10 +174,11 @@
|
|||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
" max_nodes=6)\n",
|
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||||
|
" )\n",
|
||||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
@@ -177,7 +188,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Data\n",
|
"# Data<a id=\"data\"></a>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"We will use energy consumption [data from New York City](http://mis.nyiso.com/public/P-58Blist.htm) for model training. The data is stored in a tabular format and includes energy demand and basic weather data at an hourly frequency. \n",
|
"We will use energy consumption [data from New York City](http://mis.nyiso.com/public/P-58Blist.htm) for model training. The data is stored in a tabular format and includes energy demand and basic weather data at an hourly frequency. \n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -202,8 +213,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"target_column_name = 'demand'\n",
|
"target_column_name = \"demand\"\n",
|
||||||
"time_column_name = 'timeStamp'"
|
"time_column_name = \"timeStamp\""
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -212,7 +223,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"dataset = Dataset.Tabular.from_delimited_files(path = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv\").with_timestamp_columns(fine_grain_timestamp=time_column_name) \n",
|
"dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||||
|
" path=\"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv\"\n",
|
||||||
|
").with_timestamp_columns(fine_grain_timestamp=time_column_name)\n",
|
||||||
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
|
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -254,8 +267,12 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# split into train based on time\n",
|
"# split into train based on time\n",
|
||||||
"train = dataset.time_before(datetime(2017, 8, 8, 5), include_boundary=True)\n",
|
"train = (\n",
|
||||||
"train.to_pandas_dataframe().reset_index(drop=True).sort_values(time_column_name).tail(5)"
|
" dataset.time_before(datetime(2017, 8, 8, 5), include_boundary=True)\n",
|
||||||
|
" .to_pandas_dataframe()\n",
|
||||||
|
" .reset_index(drop=True)\n",
|
||||||
|
")\n",
|
||||||
|
"train.sort_values(time_column_name).tail(5)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -265,8 +282,39 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# split into test based on time\n",
|
"# split into test based on time\n",
|
||||||
"test = dataset.time_between(datetime(2017, 8, 8, 6), datetime(2017, 8, 10, 5))\n",
|
"test = (\n",
|
||||||
"test.to_pandas_dataframe().reset_index(drop=True).head(5)"
|
" dataset.time_between(datetime(2017, 8, 8, 6), datetime(2017, 8, 10, 5))\n",
|
||||||
|
" .to_pandas_dataframe()\n",
|
||||||
|
" .reset_index(drop=True)\n",
|
||||||
|
")\n",
|
||||||
|
"test.head(5)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"jupyter": {
|
||||||
|
"outputs_hidden": false
|
||||||
|
},
|
||||||
|
"nteract": {
|
||||||
|
"transient": {
|
||||||
|
"deleting": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# register the splitted train and test data in workspace storage\n",
|
||||||
|
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||||
|
"\n",
|
||||||
|
"datastore = ws.get_default_datastore()\n",
|
||||||
|
"train_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||||
|
" train, target=(datastore, \"dataset/\"), name=\"nyc_energy_train\"\n",
|
||||||
|
")\n",
|
||||||
|
"test_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||||
|
" test, target=(datastore, \"dataset/\"), name=\"nyc_energy_test\"\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -302,14 +350,15 @@
|
|||||||
"|-|-|\n",
|
"|-|-|\n",
|
||||||
"|**time_column_name**|The name of your time column.|\n",
|
"|**time_column_name**|The name of your time column.|\n",
|
||||||
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n",
|
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n",
|
||||||
"|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information."
|
"|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.\n",
|
||||||
|
"|**cv_step_size**|Number of periods between two consecutive cross-validation folds. The default value is \"auto\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Train\n",
|
"# Train<a id=\"train\"></a>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Instantiate an AutoMLConfig object. This config defines the settings and data used to run the experiment. We can provide extra configurations within 'automl_settings', for this forecasting task we add the forecasting parameters to hold all the additional forecasting parameters.\n",
|
"Instantiate an AutoMLConfig object. This config defines the settings and data used to run the experiment. We can provide extra configurations within 'automl_settings', for this forecasting task we add the forecasting parameters to hold all the additional forecasting parameters.\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -322,7 +371,7 @@
|
|||||||
"|**training_data**|The training data to be used within the experiment.|\n",
|
"|**training_data**|The training data to be used within the experiment.|\n",
|
||||||
"|**label_column_name**|The name of the label column.|\n",
|
"|**label_column_name**|The name of the label column.|\n",
|
||||||
"|**compute_target**|The remote compute for training.|\n",
|
"|**compute_target**|The remote compute for training.|\n",
|
||||||
"|**n_cross_validations**|Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way.|\n",
|
"|**n_cross_validations**|Number of cross-validation folds to use for model/pipeline selection. The default value is \"auto\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value.\n",
|
||||||
"|**enable_early_stopping**|Flag to enble early termination if the score is not improving in the short term.|\n",
|
"|**enable_early_stopping**|Flag to enble early termination if the score is not improving in the short term.|\n",
|
||||||
"|**forecasting_parameters**|A class holds all the forecasting related parameters.|\n"
|
"|**forecasting_parameters**|A class holds all the forecasting related parameters.|\n"
|
||||||
]
|
]
|
||||||
@@ -341,21 +390,27 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||||
|
"\n",
|
||||||
"forecasting_parameters = ForecastingParameters(\n",
|
"forecasting_parameters = ForecastingParameters(\n",
|
||||||
" time_column_name=time_column_name, forecast_horizon=forecast_horizon\n",
|
" time_column_name=time_column_name,\n",
|
||||||
|
" forecast_horizon=forecast_horizon,\n",
|
||||||
|
" freq=\"H\", # Set the forecast frequency to be hourly\n",
|
||||||
|
" cv_step_size=\"auto\",\n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
" task=\"forecasting\",\n",
|
||||||
" blocked_models = ['ExtremeRandomTrees', 'AutoArima', 'Prophet'], \n",
|
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||||
" experiment_timeout_hours=0.3,\n",
|
" blocked_models=[\"ExtremeRandomTrees\", \"AutoArima\", \"Prophet\"],\n",
|
||||||
" training_data=train,\n",
|
" experiment_timeout_hours=0.3,\n",
|
||||||
" label_column_name=target_column_name,\n",
|
" training_data=train_dataset,\n",
|
||||||
" compute_target=compute_target,\n",
|
" label_column_name=target_column_name,\n",
|
||||||
" enable_early_stopping=True,\n",
|
" compute_target=compute_target,\n",
|
||||||
" n_cross_validations=3, \n",
|
" enable_early_stopping=True,\n",
|
||||||
" verbosity=logging.INFO,\n",
|
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||||
" forecasting_parameters=forecasting_parameters)"
|
" verbosity=logging.INFO,\n",
|
||||||
|
" forecasting_parameters=forecasting_parameters,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -375,15 +430,6 @@
|
|||||||
"remote_run = experiment.submit(automl_config, show_output=False)"
|
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"remote_run"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
@@ -397,8 +443,8 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Retrieve the Best Model\n",
|
"## Retrieve the Best Run details\n",
|
||||||
"Below we select the best model from all the training iterations using get_output method."
|
"Below we retrieve the best Run object from among all the runs in the experiment."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -407,8 +453,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"best_run, fitted_model = remote_run.get_output()\n",
|
"best_run = remote_run.get_best_child()\n",
|
||||||
"fitted_model.steps"
|
"best_run"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -416,7 +462,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Featurization\n",
|
"## Featurization\n",
|
||||||
"You can access the engineered feature names generated in time-series featurization."
|
"We can look at the engineered feature names generated in time-series featurization via. the JSON file named 'engineered_feature_names.json' under the run outputs."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -425,7 +471,14 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()"
|
"# Download the JSON file locally\n",
|
||||||
|
"best_run.download_file(\n",
|
||||||
|
" \"outputs/engineered_feature_names.json\", \"engineered_feature_names.json\"\n",
|
||||||
|
")\n",
|
||||||
|
"with open(\"engineered_feature_names.json\", \"r\") as f:\n",
|
||||||
|
" records = json.load(f)\n",
|
||||||
|
"\n",
|
||||||
|
"records"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -448,37 +501,37 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Get the featurization summary as a list of JSON\n",
|
"# Download the featurization summary JSON file locally\n",
|
||||||
"featurization_summary = fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()\n",
|
"best_run.download_file(\n",
|
||||||
"# View the featurization summary as a pandas dataframe\n",
|
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||||
"pd.DataFrame.from_records(featurization_summary)"
|
")\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Forecasting\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"Now that we have retrieved the best pipeline/model, it can be used to make predictions on test data. First, we remove the target values from the test set:"
|
"# Render the JSON as a pandas DataFrame\n",
|
||||||
]
|
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||||
},
|
" records = json.load(f)\n",
|
||||||
{
|
"fs = pd.DataFrame.from_records(records)\n",
|
||||||
"cell_type": "code",
|
"\n",
|
||||||
"execution_count": null,
|
"# View a summary of the featurization\n",
|
||||||
"metadata": {},
|
"fs[\n",
|
||||||
"outputs": [],
|
" [\n",
|
||||||
"source": [
|
" \"RawFeatureName\",\n",
|
||||||
"X_test = test.to_pandas_dataframe().reset_index(drop=True)\n",
|
" \"TypeDetected\",\n",
|
||||||
"y_test = X_test.pop(target_column_name).values"
|
" \"Dropped\",\n",
|
||||||
|
" \"EngineeredFeatureCount\",\n",
|
||||||
|
" \"Transformations\",\n",
|
||||||
|
" ]\n",
|
||||||
|
"]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Forecast Function\n",
|
"# Forecasting<a id=\"forecast\"></a>\n",
|
||||||
"For forecasting, we will use the forecast function instead of the predict function. Using the predict method would result in getting predictions for EVERY horizon the forecaster can predict at. This is useful when training and evaluating the performance of the forecaster at various horizons, but the level of detail is excessive for normal use. Forecast function also can handle more complicated scenarios, see the [forecast function notebook](../forecasting-forecast-function/auto-ml-forecasting-function.ipynb)."
|
"\n",
|
||||||
|
"Now that we have retrieved the best pipeline/model, it can be used to make predictions on test data. We will do batch scoring on the test dataset which should have the same schema as training dataset.\n",
|
||||||
|
"\n",
|
||||||
|
"The inference will run on a remote compute. In this example, it will re-use the training compute."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -487,10 +540,36 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# The featurized data, aligned to y, will also be returned.\n",
|
"test_experiment = Experiment(ws, experiment_name + \"_inference\")"
|
||||||
"# This contains the assumptions that were made in the forecast\n",
|
]
|
||||||
"# and helps align the forecast to the original data\n",
|
},
|
||||||
"y_predictions, X_trans = fitted_model.forecast(X_test)"
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Retrieving forecasts from the model\n",
|
||||||
|
"We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from run_forecast import run_remote_inference\n",
|
||||||
|
"\n",
|
||||||
|
"remote_run_infer = run_remote_inference(\n",
|
||||||
|
" test_experiment=test_experiment,\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" train_run=best_run,\n",
|
||||||
|
" test_dataset=test_dataset,\n",
|
||||||
|
" target_column_name=target_column_name,\n",
|
||||||
|
")\n",
|
||||||
|
"remote_run_infer.wait_for_completion(show_output=False)\n",
|
||||||
|
"\n",
|
||||||
|
"# download the inference output file to the local machine\n",
|
||||||
|
"remote_run_infer.download_file(\"outputs/predictions.csv\", \"predictions.csv\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -498,9 +577,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Evaluate\n",
|
"### Evaluate\n",
|
||||||
"To evaluate the accuracy of the forecast, we'll compare against the actual sales quantities for some select metrics, included the mean absolute percentage error (MAPE). For more metrics that can be used for evaluation after training, please see [supported metrics](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#regressionforecasting-metrics), and [how to calculate residuals](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#residuals).\n",
|
"To evaluate the accuracy of the forecast, we'll compare against the actual sales quantities for some select metrics, included the mean absolute percentage error (MAPE). For more metrics that can be used for evaluation after training, please see [supported metrics](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#regressionforecasting-metrics), and [how to calculate residuals](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#residuals)."
|
||||||
"\n",
|
|
||||||
"It is a good practice to always align the output explicitly to the input, as the count and order of the rows may have changed during transformations that span multiple rows."
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -509,9 +586,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from forecasting_helper import align_outputs\n",
|
"# load forecast data frame\n",
|
||||||
"\n",
|
"fcst_df = pd.read_csv(\"predictions.csv\", parse_dates=[time_column_name])\n",
|
||||||
"df_all = align_outputs(y_predictions, X_trans, X_test, y_test, target_column_name)"
|
"fcst_df.head()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -526,19 +603,24 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"# use automl metrics module\n",
|
"# use automl metrics module\n",
|
||||||
"scores = scoring.score_regression(\n",
|
"scores = scoring.score_regression(\n",
|
||||||
" y_test=df_all[target_column_name],\n",
|
" y_test=fcst_df[target_column_name],\n",
|
||||||
" y_pred=df_all['predicted'],\n",
|
" y_pred=fcst_df[\"predicted\"],\n",
|
||||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
|
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"[Test data scores]\\n\")\n",
|
"print(\"[Test data scores]\\n\")\n",
|
||||||
"for key, value in scores.items(): \n",
|
"for key, value in scores.items():\n",
|
||||||
" print('{}: {:.3f}'.format(key, value))\n",
|
" print(\"{}: {:.3f}\".format(key, value))\n",
|
||||||
" \n",
|
"\n",
|
||||||
"# Plot outputs\n",
|
"# Plot outputs\n",
|
||||||
"%matplotlib inline\n",
|
"%matplotlib inline\n",
|
||||||
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
|
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df[\"predicted\"], color=\"b\")\n",
|
||||||
"test_test = plt.scatter(df_all[target_column_name], df_all[target_column_name], color='g')\n",
|
"test_test = plt.scatter(\n",
|
||||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
" fcst_df[target_column_name], fcst_df[target_column_name], color=\"g\"\n",
|
||||||
|
")\n",
|
||||||
|
"plt.legend(\n",
|
||||||
|
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||||
|
")\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -546,23 +628,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"Looking at `X_trans` is also useful to see what featurization happened to the data."
|
"# Advanced Training <a id=\"advanced_training\"></a>\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"X_trans"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Advanced Training <a id=\"advanced_training\"></a>\n",
|
|
||||||
"We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation."
|
"We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -583,21 +649,34 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"advanced_forecasting_parameters = ForecastingParameters(\n",
|
"advanced_forecasting_parameters = ForecastingParameters(\n",
|
||||||
" time_column_name=time_column_name, forecast_horizon=forecast_horizon,\n",
|
" time_column_name=time_column_name,\n",
|
||||||
" target_lags=12, target_rolling_window_size=4\n",
|
" forecast_horizon=forecast_horizon,\n",
|
||||||
|
" target_lags=12,\n",
|
||||||
|
" target_rolling_window_size=4,\n",
|
||||||
|
" cv_step_size=\"auto\",\n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
" task=\"forecasting\",\n",
|
||||||
" blocked_models = ['ElasticNet','ExtremeRandomTrees','GradientBoosting','XGBoostRegressor','ExtremeRandomTrees', 'AutoArima', 'Prophet'], #These models are blocked for tutorial purposes, remove this for real use cases. \n",
|
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||||
" experiment_timeout_hours=0.3,\n",
|
" blocked_models=[\n",
|
||||||
" training_data=train,\n",
|
" \"ElasticNet\",\n",
|
||||||
" label_column_name=target_column_name,\n",
|
" \"ExtremeRandomTrees\",\n",
|
||||||
" compute_target=compute_target,\n",
|
" \"GradientBoosting\",\n",
|
||||||
" enable_early_stopping = True,\n",
|
" \"XGBoostRegressor\",\n",
|
||||||
" n_cross_validations=3, \n",
|
" \"ExtremeRandomTrees\",\n",
|
||||||
" verbosity=logging.INFO,\n",
|
" \"AutoArima\",\n",
|
||||||
" forecasting_parameters=advanced_forecasting_parameters)"
|
" \"Prophet\",\n",
|
||||||
|
" ], # These models are blocked for tutorial purposes, remove this for real use cases.\n",
|
||||||
|
" experiment_timeout_hours=0.3,\n",
|
||||||
|
" training_data=train_dataset,\n",
|
||||||
|
" label_column_name=target_column_name,\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" enable_early_stopping=True,\n",
|
||||||
|
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||||
|
" verbosity=logging.INFO,\n",
|
||||||
|
" forecasting_parameters=advanced_forecasting_parameters,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -629,7 +708,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Retrieve the Best Model"
|
"### Retrieve the Best Run details"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -638,14 +717,15 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"best_run_lags, fitted_model_lags = advanced_remote_run.get_output()"
|
"best_run_lags = remote_run.get_best_child()\n",
|
||||||
|
"best_run_lags"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Advanced Results<a id=\"advanced_results\"></a>\n",
|
"# Advanced Results<a id=\"advanced_results\"></a>\n",
|
||||||
"We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation."
|
"We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -655,10 +735,21 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# The featurized data, aligned to y, will also be returned.\n",
|
"test_experiment_advanced = Experiment(ws, experiment_name + \"_inference_advanced\")\n",
|
||||||
"# This contains the assumptions that were made in the forecast\n",
|
"advanced_remote_run_infer = run_remote_inference(\n",
|
||||||
"# and helps align the forecast to the original data\n",
|
" test_experiment=test_experiment_advanced,\n",
|
||||||
"y_predictions, X_trans = fitted_model_lags.forecast(X_test)"
|
" compute_target=compute_target,\n",
|
||||||
|
" train_run=best_run_lags,\n",
|
||||||
|
" test_dataset=test_dataset,\n",
|
||||||
|
" target_column_name=target_column_name,\n",
|
||||||
|
" inference_folder=\"./forecast_advanced\",\n",
|
||||||
|
")\n",
|
||||||
|
"advanced_remote_run_infer.wait_for_completion(show_output=False)\n",
|
||||||
|
"\n",
|
||||||
|
"# download the inference output file to the local machine\n",
|
||||||
|
"advanced_remote_run_infer.download_file(\n",
|
||||||
|
" \"outputs/predictions.csv\", \"predictions_advanced.csv\"\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -667,9 +758,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from forecasting_helper import align_outputs\n",
|
"fcst_adv_df = pd.read_csv(\"predictions_advanced.csv\", parse_dates=[time_column_name])\n",
|
||||||
"\n",
|
"fcst_adv_df.head()"
|
||||||
"df_all = align_outputs(y_predictions, X_trans, X_test, y_test, target_column_name)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -684,19 +774,26 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"# use automl metrics module\n",
|
"# use automl metrics module\n",
|
||||||
"scores = scoring.score_regression(\n",
|
"scores = scoring.score_regression(\n",
|
||||||
" y_test=df_all[target_column_name],\n",
|
" y_test=fcst_adv_df[target_column_name],\n",
|
||||||
" y_pred=df_all['predicted'],\n",
|
" y_pred=fcst_adv_df[\"predicted\"],\n",
|
||||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
|
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"[Test data scores]\\n\")\n",
|
"print(\"[Test data scores]\\n\")\n",
|
||||||
"for key, value in scores.items(): \n",
|
"for key, value in scores.items():\n",
|
||||||
" print('{}: {:.3f}'.format(key, value))\n",
|
" print(\"{}: {:.3f}\".format(key, value))\n",
|
||||||
" \n",
|
"\n",
|
||||||
"# Plot outputs\n",
|
"# Plot outputs\n",
|
||||||
"%matplotlib inline\n",
|
"%matplotlib inline\n",
|
||||||
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
|
"test_pred = plt.scatter(\n",
|
||||||
"test_test = plt.scatter(df_all[target_column_name], df_all[target_column_name], color='g')\n",
|
" fcst_adv_df[target_column_name], fcst_adv_df[\"predicted\"], color=\"b\"\n",
|
||||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
")\n",
|
||||||
|
"test_test = plt.scatter(\n",
|
||||||
|
" fcst_adv_df[target_column_name], fcst_adv_df[target_column_name], color=\"g\"\n",
|
||||||
|
")\n",
|
||||||
|
"plt.legend(\n",
|
||||||
|
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||||
|
")\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -711,10 +808,13 @@
|
|||||||
"how-to-use-azureml",
|
"how-to-use-azureml",
|
||||||
"automated-machine-learning"
|
"automated-machine-learning"
|
||||||
],
|
],
|
||||||
|
"kernel_info": {
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.6",
|
"display_name": "Python 3.8 - AzureML",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python36"
|
"name": "python38-azureml"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
@@ -726,9 +826,22 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.6.8"
|
"version": "3.8.10"
|
||||||
|
},
|
||||||
|
"microsoft": {
|
||||||
|
"ms_spell_check": {
|
||||||
|
"ms_spell_check_language": "en"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nteract": {
|
||||||
|
"version": "nteract-front-end@1.0.0"
|
||||||
|
},
|
||||||
|
"vscode": {
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 2
|
"nbformat_minor": 4
|
||||||
}
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-energy-demand
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
from pandas.tseries.frequencies import to_offset
|
|
||||||
|
|
||||||
|
|
||||||
def align_outputs(y_predicted, X_trans, X_test, y_test, target_column_name,
|
|
||||||
predicted_column_name='predicted',
|
|
||||||
horizon_colname='horizon_origin'):
|
|
||||||
"""
|
|
||||||
Demonstrates how to get the output aligned to the inputs
|
|
||||||
using pandas indexes. Helps understand what happened if
|
|
||||||
the output's shape differs from the input shape, or if
|
|
||||||
the data got re-sorted by time and grain during forecasting.
|
|
||||||
|
|
||||||
Typical causes of misalignment are:
|
|
||||||
* we predicted some periods that were missing in actuals -> drop from eval
|
|
||||||
* model was asked to predict past max_horizon -> increase max horizon
|
|
||||||
* data at start of X_test was needed for lags -> provide previous periods
|
|
||||||
"""
|
|
||||||
|
|
||||||
if (horizon_colname in X_trans):
|
|
||||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
|
|
||||||
horizon_colname: X_trans[horizon_colname]})
|
|
||||||
else:
|
|
||||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
|
|
||||||
|
|
||||||
# y and X outputs are aligned by forecast() function contract
|
|
||||||
df_fcst.index = X_trans.index
|
|
||||||
|
|
||||||
# align original X_test to y_test
|
|
||||||
X_test_full = X_test.copy()
|
|
||||||
X_test_full[target_column_name] = y_test
|
|
||||||
|
|
||||||
# X_test_full's index does not include origin, so reset for merge
|
|
||||||
df_fcst.reset_index(inplace=True)
|
|
||||||
X_test_full = X_test_full.reset_index().drop(columns='index')
|
|
||||||
together = df_fcst.merge(X_test_full, how='right')
|
|
||||||
|
|
||||||
# drop rows where prediction or actuals are nan
|
|
||||||
# happens because of missing actuals
|
|
||||||
# or at edges of time due to lags/rolling windows
|
|
||||||
clean = together[together[[target_column_name,
|
|
||||||
predicted_column_name]].notnull().all(axis=1)]
|
|
||||||
return(clean)
|
|
||||||
@@ -0,0 +1,61 @@
|
|||||||
|
"""
|
||||||
|
This is the script that is executed on the compute instance. It relies
|
||||||
|
on the model.pkl file which is uploaded along with this script to the
|
||||||
|
compute instance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
from azureml.core import Dataset, Run
|
||||||
|
import joblib
|
||||||
|
from pandas.tseries.frequencies import to_offset
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"--target_column_name",
|
||||||
|
type=str,
|
||||||
|
dest="target_column_name",
|
||||||
|
help="Target Column Name",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
target_column_name = args.target_column_name
|
||||||
|
test_dataset_id = args.test_dataset
|
||||||
|
|
||||||
|
run = Run.get_context()
|
||||||
|
ws = run.experiment.workspace
|
||||||
|
|
||||||
|
# get the input dataset by id
|
||||||
|
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
|
||||||
|
|
||||||
|
X_test = test_dataset.to_pandas_dataframe().reset_index(drop=True)
|
||||||
|
y_test = X_test.pop(target_column_name).values
|
||||||
|
|
||||||
|
# generate forecast
|
||||||
|
fitted_model = joblib.load("model.pkl")
|
||||||
|
# We have default quantiles values set as below(95th percentile)
|
||||||
|
quantiles = [0.025, 0.5, 0.975]
|
||||||
|
predicted_column_name = "predicted"
|
||||||
|
PI = "prediction_interval"
|
||||||
|
fitted_model.quantiles = quantiles
|
||||||
|
pred_quantiles = fitted_model.forecast_quantiles(X_test)
|
||||||
|
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
|
||||||
|
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
|
||||||
|
)
|
||||||
|
X_test[target_column_name] = y_test
|
||||||
|
X_test[PI] = pred_quantiles[PI]
|
||||||
|
X_test[predicted_column_name] = pred_quantiles[0.5]
|
||||||
|
# drop rows where prediction or actuals are nan
|
||||||
|
# happens because of missing actuals
|
||||||
|
# or at edges of time due to lags/rolling windows
|
||||||
|
clean = X_test[
|
||||||
|
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
|
||||||
|
]
|
||||||
|
|
||||||
|
file_name = "outputs/predictions.csv"
|
||||||
|
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
|
||||||
|
|
||||||
|
# Upload the predictions into artifacts
|
||||||
|
run.upload_file(name=file_name, path_or_stream=file_name)
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
|
|
||||||
def APE(actual, pred):
|
|
||||||
"""
|
|
||||||
Calculate absolute percentage error.
|
|
||||||
Returns a vector of APE values with same length as actual/pred.
|
|
||||||
"""
|
|
||||||
return 100 * np.abs((actual - pred) / actual)
|
|
||||||
|
|
||||||
|
|
||||||
def MAPE(actual, pred):
|
|
||||||
"""
|
|
||||||
Calculate mean absolute percentage error.
|
|
||||||
Remove NA and values where actual is close to zero
|
|
||||||
"""
|
|
||||||
not_na = ~(np.isnan(actual) | np.isnan(pred))
|
|
||||||
not_zero = ~np.isclose(actual, 0.0)
|
|
||||||
actual_safe = actual[not_na & not_zero]
|
|
||||||
pred_safe = pred[not_na & not_zero]
|
|
||||||
return np.mean(APE(actual_safe, pred_safe))
|
|
||||||
@@ -0,0 +1,49 @@
|
|||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
from azureml.core import ScriptRunConfig
|
||||||
|
|
||||||
|
|
||||||
|
def run_remote_inference(
|
||||||
|
test_experiment,
|
||||||
|
compute_target,
|
||||||
|
train_run,
|
||||||
|
test_dataset,
|
||||||
|
target_column_name,
|
||||||
|
inference_folder="./forecast",
|
||||||
|
):
|
||||||
|
# Create local directory to copy the model.pkl and forecsting_script.py files into.
|
||||||
|
# These files will be uploaded to and executed on the compute instance.
|
||||||
|
os.makedirs(inference_folder, exist_ok=True)
|
||||||
|
shutil.copy("forecasting_script.py", inference_folder)
|
||||||
|
|
||||||
|
train_run.download_file(
|
||||||
|
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
|
||||||
|
)
|
||||||
|
|
||||||
|
inference_env = train_run.get_environment()
|
||||||
|
|
||||||
|
config = ScriptRunConfig(
|
||||||
|
source_directory=inference_folder,
|
||||||
|
script="forecasting_script.py",
|
||||||
|
arguments=[
|
||||||
|
"--target_column_name",
|
||||||
|
target_column_name,
|
||||||
|
"--test_dataset",
|
||||||
|
test_dataset.as_named_input(test_dataset.name),
|
||||||
|
],
|
||||||
|
compute_target=compute_target,
|
||||||
|
environment=inference_env,
|
||||||
|
)
|
||||||
|
|
||||||
|
run = test_experiment.submit(
|
||||||
|
config,
|
||||||
|
tags={
|
||||||
|
"training_run_id": train_run.id,
|
||||||
|
"run_algorithm": train_run.properties["run_algorithm"],
|
||||||
|
"valid_score": train_run.properties["score"],
|
||||||
|
"primary_metric": train_run.properties["primary_metric"],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||||
|
return run
|
||||||
@@ -52,7 +52,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"Please make sure you have followed the `configuration.ipynb` notebook so that your ML workspace information is saved in the config file."
|
"Please make sure you have followed the [configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) so that your ML workspace information is saved in the config file."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -85,7 +85,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -94,7 +94,6 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -111,19 +110,20 @@
|
|||||||
"ws = Workspace.from_config()\n",
|
"ws = Workspace.from_config()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# choose a name for the run history container in the workspace\n",
|
"# choose a name for the run history container in the workspace\n",
|
||||||
"experiment_name = 'automl-forecast-function-demo'\n",
|
"experiment_name = \"automl-forecast-function-demo\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"experiment = Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = {}\n",
|
"output = {}\n",
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
"output['Workspace'] = ws.name\n",
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
"output['SKU'] = ws.sku\n",
|
"output[\"SKU\"] = ws.sku\n",
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output['Run History Name'] = experiment_name\n",
|
"output[\"Run History Name\"] = experiment_name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -141,17 +141,20 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"TIME_COLUMN_NAME = 'date'\n",
|
"TIME_COLUMN_NAME = \"date\"\n",
|
||||||
"TIME_SERIES_ID_COLUMN_NAME = 'time_series_id'\n",
|
"TIME_SERIES_ID_COLUMN_NAME = \"time_series_id\"\n",
|
||||||
"TARGET_COLUMN_NAME = 'y'\n",
|
"TARGET_COLUMN_NAME = \"y\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def get_timeseries(train_len: int,\n",
|
"\n",
|
||||||
" test_len: int,\n",
|
"def get_timeseries(\n",
|
||||||
" time_column_name: str,\n",
|
" train_len: int,\n",
|
||||||
" target_column_name: str,\n",
|
" test_len: int,\n",
|
||||||
" time_series_id_column_name: str,\n",
|
" time_column_name: str,\n",
|
||||||
" time_series_number: int = 1,\n",
|
" target_column_name: str,\n",
|
||||||
" freq: str = 'H'):\n",
|
" time_series_id_column_name: str,\n",
|
||||||
|
" time_series_number: int = 1,\n",
|
||||||
|
" freq: str = \"H\",\n",
|
||||||
|
"):\n",
|
||||||
" \"\"\"\n",
|
" \"\"\"\n",
|
||||||
" Return the time series of designed length.\n",
|
" Return the time series of designed length.\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -174,14 +177,18 @@
|
|||||||
" data_test = [] # type: List[pd.DataFrame]\n",
|
" data_test = [] # type: List[pd.DataFrame]\n",
|
||||||
" data_length = train_len + test_len\n",
|
" data_length = train_len + test_len\n",
|
||||||
" for i in range(time_series_number):\n",
|
" for i in range(time_series_number):\n",
|
||||||
" X = pd.DataFrame({\n",
|
" X = pd.DataFrame(\n",
|
||||||
" time_column_name: pd.date_range(start='2000-01-01',\n",
|
" {\n",
|
||||||
" periods=data_length,\n",
|
" time_column_name: pd.date_range(\n",
|
||||||
" freq=freq),\n",
|
" start=\"2000-01-01\", periods=data_length, freq=freq\n",
|
||||||
" target_column_name: np.arange(data_length).astype(float) + np.random.rand(data_length) + i*5,\n",
|
" ),\n",
|
||||||
" 'ext_predictor': np.asarray(range(42, 42 + data_length)),\n",
|
" target_column_name: np.arange(data_length).astype(float)\n",
|
||||||
" time_series_id_column_name: np.repeat('ts{}'.format(i), data_length)\n",
|
" + np.random.rand(data_length)\n",
|
||||||
" })\n",
|
" + i * 5,\n",
|
||||||
|
" \"ext_predictor\": np.asarray(range(42, 42 + data_length)),\n",
|
||||||
|
" time_series_id_column_name: np.repeat(\"ts{}\".format(i), data_length),\n",
|
||||||
|
" }\n",
|
||||||
|
" )\n",
|
||||||
" data_train.append(X[:train_len])\n",
|
" data_train.append(X[:train_len])\n",
|
||||||
" data_test.append(X[train_len:])\n",
|
" data_test.append(X[train_len:])\n",
|
||||||
" X_train = pd.concat(data_train)\n",
|
" X_train = pd.concat(data_train)\n",
|
||||||
@@ -190,14 +197,17 @@
|
|||||||
" y_test = X_test.pop(target_column_name).values\n",
|
" y_test = X_test.pop(target_column_name).values\n",
|
||||||
" return X_train, y_train, X_test, y_test\n",
|
" return X_train, y_train, X_test, y_test\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"\n",
|
||||||
"n_test_periods = 6\n",
|
"n_test_periods = 6\n",
|
||||||
"n_train_periods = 30\n",
|
"n_train_periods = 30\n",
|
||||||
"X_train, y_train, X_test, y_test = get_timeseries(train_len=n_train_periods,\n",
|
"X_train, y_train, X_test, y_test = get_timeseries(\n",
|
||||||
" test_len=n_test_periods,\n",
|
" train_len=n_train_periods,\n",
|
||||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
" test_len=n_test_periods,\n",
|
||||||
" target_column_name=TARGET_COLUMN_NAME,\n",
|
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||||
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
|
" target_column_name=TARGET_COLUMN_NAME,\n",
|
||||||
" time_series_number=2)"
|
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
|
||||||
|
" time_series_number=2,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -224,11 +234,12 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"# plot the example time series\n",
|
"# plot the example time series\n",
|
||||||
"import matplotlib.pyplot as plt\n",
|
"import matplotlib.pyplot as plt\n",
|
||||||
|
"\n",
|
||||||
"whole_data = X_train.copy()\n",
|
"whole_data = X_train.copy()\n",
|
||||||
"target_label = 'y'\n",
|
"target_label = \"y\"\n",
|
||||||
"whole_data[target_label] = y_train\n",
|
"whole_data[target_label] = y_train\n",
|
||||||
"for g in whole_data.groupby('time_series_id'): \n",
|
"for g in whole_data.groupby(\"time_series_id\"):\n",
|
||||||
" plt.plot(g[1]['date'].values, g[1]['y'].values, label=g[0])\n",
|
" plt.plot(g[1][\"date\"].values, g[1][\"y\"].values, label=g[0])\n",
|
||||||
"plt.legend()\n",
|
"plt.legend()\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
@@ -250,12 +261,12 @@
|
|||||||
"# We need to save thw artificial data and then upload them to default workspace datastore.\n",
|
"# We need to save thw artificial data and then upload them to default workspace datastore.\n",
|
||||||
"DATA_PATH = \"fc_fn_data\"\n",
|
"DATA_PATH = \"fc_fn_data\"\n",
|
||||||
"DATA_PATH_X = \"{}/data_train.csv\".format(DATA_PATH)\n",
|
"DATA_PATH_X = \"{}/data_train.csv\".format(DATA_PATH)\n",
|
||||||
"if not os.path.isdir('data'):\n",
|
"if not os.path.isdir(\"data\"):\n",
|
||||||
" os.mkdir('data')\n",
|
" os.mkdir(\"data\")\n",
|
||||||
"pd.DataFrame(whole_data).to_csv(\"data/data_train.csv\", index=False)\n",
|
"pd.DataFrame(whole_data).to_csv(\"data/data_train.csv\", index=False)\n",
|
||||||
"# Upload saved data to the default data store.\n",
|
"# Upload saved data to the default data store.\n",
|
||||||
"ds = ws.get_default_datastore()\n",
|
"ds = ws.get_default_datastore()\n",
|
||||||
"ds.upload(src_dir='./data', target_path=DATA_PATH, overwrite=True, show_progress=True)\n",
|
"ds.upload(src_dir=\"./data\", target_path=DATA_PATH, overwrite=True, show_progress=True)\n",
|
||||||
"train_data = Dataset.Tabular.from_delimited_files(path=ds.path(DATA_PATH_X))"
|
"train_data = Dataset.Tabular.from_delimited_files(path=ds.path(DATA_PATH_X))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -263,7 +274,9 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource."
|
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -281,10 +294,11 @@
|
|||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
" max_nodes=6)\n",
|
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||||
|
" )\n",
|
||||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
@@ -313,13 +327,16 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||||
"lags = [1,2,3]\n",
|
"\n",
|
||||||
|
"lags = [1, 2, 3]\n",
|
||||||
"forecast_horizon = n_test_periods\n",
|
"forecast_horizon = n_test_periods\n",
|
||||||
"forecasting_parameters = ForecastingParameters(\n",
|
"forecasting_parameters = ForecastingParameters(\n",
|
||||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||||
" forecast_horizon=forecast_horizon,\n",
|
" forecast_horizon=forecast_horizon,\n",
|
||||||
" time_series_id_column_names=[ TIME_SERIES_ID_COLUMN_NAME ],\n",
|
" time_series_id_column_names=[TIME_SERIES_ID_COLUMN_NAME],\n",
|
||||||
" target_lags=lags\n",
|
" target_lags=lags,\n",
|
||||||
|
" freq=\"H\", # Set the forecast frequency to be hourly,\n",
|
||||||
|
" cv_step_size=\"auto\",\n",
|
||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -341,19 +358,21 @@
|
|||||||
"from azureml.train.automl import AutoMLConfig\n",
|
"from azureml.train.automl import AutoMLConfig\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task='forecasting',\n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" debug_log='automl_forecasting_function.log',\n",
|
" task=\"forecasting\",\n",
|
||||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
" debug_log=\"automl_forecasting_function.log\",\n",
|
||||||
" experiment_timeout_hours=0.25,\n",
|
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||||
" enable_early_stopping=True,\n",
|
" experiment_timeout_hours=0.25,\n",
|
||||||
" training_data=train_data,\n",
|
" enable_early_stopping=True,\n",
|
||||||
" compute_target=compute_target,\n",
|
" training_data=train_data,\n",
|
||||||
" n_cross_validations=3,\n",
|
" compute_target=compute_target,\n",
|
||||||
" verbosity = logging.INFO,\n",
|
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||||
" max_concurrent_iterations=4,\n",
|
" verbosity=logging.INFO,\n",
|
||||||
" max_cores_per_iteration=-1,\n",
|
" max_concurrent_iterations=4,\n",
|
||||||
" label_column_name=target_label,\n",
|
" max_cores_per_iteration=-1,\n",
|
||||||
" forecasting_parameters=forecasting_parameters)\n",
|
" label_column_name=target_label,\n",
|
||||||
|
" forecasting_parameters=forecasting_parameters,\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"remote_run = experiment.submit(automl_config, show_output=False)"
|
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||||
]
|
]
|
||||||
@@ -430,7 +449,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"y_pred_no_gap, xy_nogap = fitted_model.forecast(X_test)\n",
|
"y_pred_no_gap, xy_nogap = fitted_model.forecast(X_test)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# xy_nogap contains the predictions in the _automl_target_col column.\n",
|
"# xy_nogap contains the predictions in the _automl_target_col column.\n",
|
||||||
"# Those same numbers are output in y_pred_no_gap\n",
|
"# Those same numbers are output in y_pred_no_gap\n",
|
||||||
@@ -458,7 +477,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"quantiles = fitted_model.forecast_quantiles(X_test)\n",
|
"quantiles = fitted_model.forecast_quantiles(X_test)\n",
|
||||||
"quantiles"
|
"quantiles"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -478,12 +497,12 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# specify which quantiles you would like \n",
|
"# specify which quantiles you would like\n",
|
||||||
"fitted_model.quantiles = [0.01, 0.5, 0.95]\n",
|
"fitted_model.quantiles = [0.01, 0.5, 0.95]\n",
|
||||||
"# use forecast_quantiles function, not the forecast() one\n",
|
"# use forecast_quantiles function, not the forecast() one\n",
|
||||||
"y_pred_quantiles = fitted_model.forecast_quantiles(X_test)\n",
|
"y_pred_quantiles = fitted_model.forecast_quantiles(X_test)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# quantile forecasts returned in a Dataframe along with the time and time series id columns \n",
|
"# quantile forecasts returned in a Dataframe along with the time and time series id columns\n",
|
||||||
"y_pred_quantiles"
|
"y_pred_quantiles"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -531,14 +550,16 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# generate the same kind of test data we trained on, \n",
|
"# generate the same kind of test data we trained on,\n",
|
||||||
"# but now make the train set much longer, so that the test set will be in the future\n",
|
"# but now make the train set much longer, so that the test set will be in the future\n",
|
||||||
"X_context, y_context, X_away, y_away = get_timeseries(train_len=42, # train data was 30 steps long\n",
|
"X_context, y_context, X_away, y_away = get_timeseries(\n",
|
||||||
" test_len=4,\n",
|
" train_len=42, # train data was 30 steps long\n",
|
||||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
" test_len=4,\n",
|
||||||
" target_column_name=TARGET_COLUMN_NAME,\n",
|
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||||
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
|
" target_column_name=TARGET_COLUMN_NAME,\n",
|
||||||
" time_series_number=2)\n",
|
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
|
||||||
|
" time_series_number=2,\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# end of the data we trained on\n",
|
"# end of the data we trained on\n",
|
||||||
"print(X_train.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())\n",
|
"print(X_train.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())\n",
|
||||||
@@ -559,7 +580,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"try: \n",
|
"try:\n",
|
||||||
" y_pred_away, xy_away = fitted_model.forecast(X_away)\n",
|
" y_pred_away, xy_away = fitted_model.forecast(X_away)\n",
|
||||||
" xy_away\n",
|
" xy_away\n",
|
||||||
"except Exception as e:\n",
|
"except Exception as e:\n",
|
||||||
@@ -581,7 +602,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"def make_forecasting_query(fulldata, time_column_name, target_column_name, forecast_origin, horizon, lookback):\n",
|
"def make_forecasting_query(\n",
|
||||||
|
" fulldata, time_column_name, target_column_name, forecast_origin, horizon, lookback\n",
|
||||||
|
"):\n",
|
||||||
"\n",
|
"\n",
|
||||||
" \"\"\"\n",
|
" \"\"\"\n",
|
||||||
" This function will take the full dataset, and create the query\n",
|
" This function will take the full dataset, and create the query\n",
|
||||||
@@ -589,24 +612,24 @@
|
|||||||
" forward for the next `horizon` horizons. Context from previous\n",
|
" forward for the next `horizon` horizons. Context from previous\n",
|
||||||
" `lookback` periods will be included.\n",
|
" `lookback` periods will be included.\n",
|
||||||
"\n",
|
"\n",
|
||||||
" \n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
" fulldata: pandas.DataFrame a time series dataset. Needs to contain X and y.\n",
|
" fulldata: pandas.DataFrame a time series dataset. Needs to contain X and y.\n",
|
||||||
" time_column_name: string which column (must be in fulldata) is the time axis\n",
|
" time_column_name: string which column (must be in fulldata) is the time axis\n",
|
||||||
" target_column_name: string which column (must be in fulldata) is to be forecast\n",
|
" target_column_name: string which column (must be in fulldata) is to be forecast\n",
|
||||||
" forecast_origin: datetime type the last time we (pretend to) have target values \n",
|
" forecast_origin: datetime type the last time we (pretend to) have target values\n",
|
||||||
" horizon: timedelta how far forward, in time units (not periods)\n",
|
" horizon: timedelta how far forward, in time units (not periods)\n",
|
||||||
" lookback: timedelta how far back does the model look?\n",
|
" lookback: timedelta how far back does the model look\n",
|
||||||
"\n",
|
"\n",
|
||||||
" Example:\n",
|
" Example:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
" ```\n",
|
" ```\n",
|
||||||
"\n",
|
"\n",
|
||||||
" forecast_origin = pd.to_datetime('2012-09-01') + pd.DateOffset(days=5) # forecast 5 days after end of training\n",
|
" forecast_origin = pd.to_datetime(\"2012-09-01\") + pd.DateOffset(days=5) # forecast 5 days after end of training\n",
|
||||||
" print(forecast_origin)\n",
|
" print(forecast_origin)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" X_query, y_query = make_forecasting_query(data, \n",
|
" X_query, y_query = make_forecasting_query(data,\n",
|
||||||
" forecast_origin = forecast_origin,\n",
|
" forecast_origin = forecast_origin,\n",
|
||||||
" horizon = pd.DateOffset(days=7), # 7 days into the future\n",
|
" horizon = pd.DateOffset(days=7), # 7 days into the future\n",
|
||||||
" lookback = pd.DateOffset(days=1), # model has lag 1 period (day)\n",
|
" lookback = pd.DateOffset(days=1), # model has lag 1 period (day)\n",
|
||||||
@@ -615,28 +638,28 @@
|
|||||||
" ```\n",
|
" ```\n",
|
||||||
" \"\"\"\n",
|
" \"\"\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
" X_past = fulldata[ (fulldata[ time_column_name ] > forecast_origin - lookback) &\n",
|
" X_past = fulldata[\n",
|
||||||
" (fulldata[ time_column_name ] <= forecast_origin)\n",
|
" (fulldata[time_column_name] > forecast_origin - lookback)\n",
|
||||||
" ]\n",
|
" & (fulldata[time_column_name] <= forecast_origin)\n",
|
||||||
|
" ]\n",
|
||||||
"\n",
|
"\n",
|
||||||
" X_future = fulldata[ (fulldata[ time_column_name ] > forecast_origin) &\n",
|
" X_future = fulldata[\n",
|
||||||
" (fulldata[ time_column_name ] <= forecast_origin + horizon)\n",
|
" (fulldata[time_column_name] > forecast_origin)\n",
|
||||||
" ]\n",
|
" & (fulldata[time_column_name] <= forecast_origin + horizon)\n",
|
||||||
|
" ]\n",
|
||||||
"\n",
|
"\n",
|
||||||
" y_past = X_past.pop(target_column_name).values.astype(np.float)\n",
|
" y_past = X_past.pop(target_column_name).values.astype(float)\n",
|
||||||
" y_future = X_future.pop(target_column_name).values.astype(np.float)\n",
|
" y_future = X_future.pop(target_column_name).values.astype(float)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Now take y_future and turn it into question marks\n",
|
" # Now take y_future and turn it into question marks\n",
|
||||||
" y_query = y_future.copy().astype(np.float) # because sometimes life hands you an int\n",
|
" y_query = y_future.copy().astype(float) # because sometimes life hands you an int\n",
|
||||||
" y_query.fill(np.NaN)\n",
|
" y_query.fill(np.NaN)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
|
||||||
" print(\"X_past is \" + str(X_past.shape) + \" - shaped\")\n",
|
" print(\"X_past is \" + str(X_past.shape) + \" - shaped\")\n",
|
||||||
" print(\"X_future is \" + str(X_future.shape) + \" - shaped\")\n",
|
" print(\"X_future is \" + str(X_future.shape) + \" - shaped\")\n",
|
||||||
" print(\"y_past is \" + str(y_past.shape) + \" - shaped\")\n",
|
" print(\"y_past is \" + str(y_past.shape) + \" - shaped\")\n",
|
||||||
" print(\"y_query is \" + str(y_query.shape) + \" - shaped\")\n",
|
" print(\"y_query is \" + str(y_query.shape) + \" - shaped\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
|
||||||
" X_pred = pd.concat([X_past, X_future])\n",
|
" X_pred = pd.concat([X_past, X_future])\n",
|
||||||
" y_pred = np.concatenate([y_past, y_query])\n",
|
" y_pred = np.concatenate([y_past, y_query])\n",
|
||||||
" return X_pred, y_pred"
|
" return X_pred, y_pred"
|
||||||
@@ -655,8 +678,16 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(X_context.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(['min','max','count']))\n",
|
"print(\n",
|
||||||
"print(X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(['min','max','count']))\n",
|
" X_context.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(\n",
|
||||||
|
" [\"min\", \"max\", \"count\"]\n",
|
||||||
|
" )\n",
|
||||||
|
")\n",
|
||||||
|
"print(\n",
|
||||||
|
" X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(\n",
|
||||||
|
" [\"min\", \"max\", \"count\"]\n",
|
||||||
|
" )\n",
|
||||||
|
")\n",
|
||||||
"X_context.tail(5)"
|
"X_context.tail(5)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -666,11 +697,11 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Since the length of the lookback is 3, \n",
|
"# Since the length of the lookback is 3,\n",
|
||||||
"# we need to add 3 periods from the context to the request\n",
|
"# we need to add 3 periods from the context to the request\n",
|
||||||
"# so that the model has the data it needs\n",
|
"# so that the model has the data it needs\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Put the X and y back together for a while. \n",
|
"# Put the X and y back together for a while.\n",
|
||||||
"# They like each other and it makes them happy.\n",
|
"# They like each other and it makes them happy.\n",
|
||||||
"X_context[TARGET_COLUMN_NAME] = y_context\n",
|
"X_context[TARGET_COLUMN_NAME] = y_context\n",
|
||||||
"X_away[TARGET_COLUMN_NAME] = y_away\n",
|
"X_away[TARGET_COLUMN_NAME] = y_away\n",
|
||||||
@@ -681,7 +712,7 @@
|
|||||||
"# it is indeed the last point of the context\n",
|
"# it is indeed the last point of the context\n",
|
||||||
"assert forecast_origin == X_context[TIME_COLUMN_NAME].max()\n",
|
"assert forecast_origin == X_context[TIME_COLUMN_NAME].max()\n",
|
||||||
"print(\"Forecast origin: \" + str(forecast_origin))\n",
|
"print(\"Forecast origin: \" + str(forecast_origin))\n",
|
||||||
" \n",
|
"\n",
|
||||||
"# the model uses lags and rolling windows to look back in time\n",
|
"# the model uses lags and rolling windows to look back in time\n",
|
||||||
"n_lookback_periods = max(lags)\n",
|
"n_lookback_periods = max(lags)\n",
|
||||||
"lookback = pd.DateOffset(hours=n_lookback_periods)\n",
|
"lookback = pd.DateOffset(hours=n_lookback_periods)\n",
|
||||||
@@ -689,8 +720,9 @@
|
|||||||
"horizon = pd.DateOffset(hours=forecast_horizon)\n",
|
"horizon = pd.DateOffset(hours=forecast_horizon)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# now make the forecast query from context (refer to figure)\n",
|
"# now make the forecast query from context (refer to figure)\n",
|
||||||
"X_pred, y_pred = make_forecasting_query(fulldata, TIME_COLUMN_NAME, TARGET_COLUMN_NAME,\n",
|
"X_pred, y_pred = make_forecasting_query(\n",
|
||||||
" forecast_origin, horizon, lookback)\n",
|
" fulldata, TIME_COLUMN_NAME, TARGET_COLUMN_NAME, forecast_origin, horizon, lookback\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# show the forecast request aligned\n",
|
"# show the forecast request aligned\n",
|
||||||
"X_show = X_pred.copy()\n",
|
"X_show = X_pred.copy()\n",
|
||||||
@@ -717,7 +749,7 @@
|
|||||||
"# show the forecast aligned\n",
|
"# show the forecast aligned\n",
|
||||||
"X_show = xy_away.reset_index()\n",
|
"X_show = xy_away.reset_index()\n",
|
||||||
"# without the generated features\n",
|
"# without the generated features\n",
|
||||||
"X_show[['date', 'time_series_id', 'ext_predictor', '_automl_target_col']]\n",
|
"X_show[[\"date\", \"time_series_id\", \"ext_predictor\", \"_automl_target_col\"]]\n",
|
||||||
"# prediction is in _automl_target_col"
|
"# prediction is in _automl_target_col"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -726,7 +758,15 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Forecasting farther than the forecast horizon <a id=\"recursive forecasting\"></a>\n",
|
"## Forecasting farther than the forecast horizon <a id=\"recursive forecasting\"></a>\n",
|
||||||
"When the forecast destination, or the latest date in the prediction data frame, is farther into the future than the specified forecast horizon, the `forecast()` function will still make point predictions out to the later date using a recursive operation mode. Internally, the method recursively applies the regular forecaster to generate context so that we can forecast further into the future. \n",
|
"When the forecast destination, or the latest date in the prediction data frame, is farther into the future than the specified forecast horizon, the forecaster must be iteratively applied. Here, we advance the forecast origin on each iteration over the prediction window, predicting `max_horizon` periods ahead on each iteration. There are two choices for the context data to use as the forecaster advances into the prediction window:\n",
|
||||||
|
"\n",
|
||||||
|
"1. We can use forecasted values from previous iterations (recursive forecast),\n",
|
||||||
|
"2. We can use known, actual values of the target if they are available (rolling forecast).\n",
|
||||||
|
"\n",
|
||||||
|
"The first method is useful in a true forecasting scenario when we do not yet know the actual target values while the second is useful in an evaluation scenario where we want to compute accuracy metrics for the `max_horizon`-period-ahead forecaster over a long test set. We refer to the first as a **recursive forecast** since we apply the forecaster recursively over the prediction window and the second as a **rolling forecast** since we roll forward over known actuals.\n",
|
||||||
|
"\n",
|
||||||
|
"### Recursive forecasting\n",
|
||||||
|
"By default, the `forecast()` function will make point predictions out to the later date using a recursive operation mode. Internally, the method recursively applies the regular forecaster to generate context so that we can forecast further into the future. \n",
|
||||||
"\n",
|
"\n",
|
||||||
"To illustrate the use-case and operation of recursive forecasting, we'll consider an example with a single time-series where the forecasting period directly follows the training period and is twice as long as the forecasting horizon given at training time.\n",
|
"To illustrate the use-case and operation of recursive forecasting, we'll consider an example with a single time-series where the forecasting period directly follows the training period and is twice as long as the forecasting horizon given at training time.\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -748,12 +788,14 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"# generate the same kind of test data we trained on, but with a single time-series and test period twice as long\n",
|
"# generate the same kind of test data we trained on, but with a single time-series and test period twice as long\n",
|
||||||
"# as the forecast_horizon.\n",
|
"# as the forecast_horizon.\n",
|
||||||
"_, _, X_test_long, y_test_long = get_timeseries(train_len=n_train_periods,\n",
|
"_, _, X_test_long, y_test_long = get_timeseries(\n",
|
||||||
" test_len=forecast_horizon*2,\n",
|
" train_len=n_train_periods,\n",
|
||||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
" test_len=forecast_horizon * 2,\n",
|
||||||
" target_column_name=TARGET_COLUMN_NAME,\n",
|
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||||
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
|
" target_column_name=TARGET_COLUMN_NAME,\n",
|
||||||
" time_series_number=1)\n",
|
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
|
||||||
|
" time_series_number=1,\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].min())\n",
|
"print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].min())\n",
|
||||||
"print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())"
|
"print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())"
|
||||||
@@ -776,12 +818,43 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# What forecast() function does in this case is equivalent to iterating it twice over the test set as the following. \n",
|
"# What forecast() function does in this case is equivalent to iterating it twice over the test set as the following.\n",
|
||||||
"y_pred1, _ = fitted_model.forecast(X_test_long[:forecast_horizon])\n",
|
"y_pred1, _ = fitted_model.forecast(X_test_long[:forecast_horizon])\n",
|
||||||
"y_pred_all, _ = fitted_model.forecast(X_test_long, np.concatenate((y_pred1, np.full(forecast_horizon, np.nan))))\n",
|
"y_pred_all, _ = fitted_model.forecast(\n",
|
||||||
|
" X_test_long, np.concatenate((y_pred1, np.full(forecast_horizon, np.nan)))\n",
|
||||||
|
")\n",
|
||||||
"np.array_equal(y_pred_all, y_pred_long)"
|
"np.array_equal(y_pred_all, y_pred_long)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Rolling forecasts\n",
|
||||||
|
"A rolling forecast is a similar concept to the recursive forecasts described above except that we use known actual values of the target for our context data. We have provided a different, public method for this called `rolling_forecast`. In addition to test data and actuals (`X_test` and `y_test`), `rolling_forecast` also accepts an optional `step` parameter that controls how far the origin advances on each iteration. The recursive forecast mode uses a fixed step of `max_horizon` while `rolling_forecast` defaults to a step size of 1, but can be set to any integer from 1 to `max_horizon`, inclusive.\n",
|
||||||
|
"\n",
|
||||||
|
"Let's see what the rolling forecast looks like on the long test set with the step set to 1:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"X_rf = fitted_model.rolling_forecast(X_test_long, y_test_long, step=1)\n",
|
||||||
|
"X_rf.head(n=12)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Notice that `rolling_forecast` has returned a single DataFrame containing all results and has generated some new columns: `_automl_forecast_origin`, `_automl_forecast_y`, and `_automl_actual_y`. These are the origin date for each forecast, the forecasted value and the actual value, respectively. Note that \"y\" in the forecast and actual column names will generally be replaced by the target column name supplied to AutoML.\n",
|
||||||
|
"\n",
|
||||||
|
"The output above shows forecasts for two prediction windows, the first with origin at the end of the training set and the second including the first observation in the test set (2000-01-01 06:00:00). Since the forecast windows overlap, there are multiple forecasts for most dates which are associated with different origin dates."
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -830,9 +903,9 @@
|
|||||||
"friendly_name": "Forecasting away from training data",
|
"friendly_name": "Forecasting away from training data",
|
||||||
"index_order": 3,
|
"index_order": 3,
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.6",
|
"display_name": "Python 3.8 - AzureML",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python36"
|
"name": "python38-azureml"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
@@ -844,14 +917,19 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.6.8"
|
"version": "3.7.13"
|
||||||
},
|
},
|
||||||
"tags": [
|
"tags": [
|
||||||
"Forecasting",
|
"Forecasting",
|
||||||
"Confidence Intervals"
|
"Confidence Intervals"
|
||||||
],
|
],
|
||||||
"task": "Forecasting"
|
"task": "Forecasting",
|
||||||
|
"vscode": {
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca"
|
||||||
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 2
|
"nbformat_minor": 4
|
||||||
}
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-function
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -19,7 +19,14 @@
|
|||||||
"hidePrompt": false
|
"hidePrompt": false
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
""
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<font color=\"red\" size=\"5\"><strong>!Important!</strong> </br>This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-github-dau)).</font>"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -30,7 +37,7 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"# Automated Machine Learning\n",
|
"# Automated Machine Learning\n",
|
||||||
"**Beer Production Forecasting**\n",
|
"**Github DAU Forecasting**\n",
|
||||||
"\n",
|
"\n",
|
||||||
"## Contents\n",
|
"## Contents\n",
|
||||||
"1. [Introduction](#Introduction)\n",
|
"1. [Introduction](#Introduction)\n",
|
||||||
@@ -48,16 +55,16 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"## Introduction\n",
|
"## Introduction\n",
|
||||||
"This notebook demonstrates demand forecasting for Beer Production Dataset using AutoML.\n",
|
"This notebook demonstrates demand forecasting for Github Daily Active Users Dataset using AutoML.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"AutoML highlights here include using Deep Learning forecasts, Arima, Prophet, Remote Execution and Remote Inferencing, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
|
"AutoML highlights here include using Deep Learning forecasts, Arima, Prophet, Remote Execution and Remote Inferencing, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
"Make sure you have executed the [configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) before running this notebook.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Notebook synopsis:\n",
|
"Notebook synopsis:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"1. Creating an Experiment in an existing Workspace\n",
|
"1. Creating an Experiment in an existing Workspace\n",
|
||||||
"2. Configuration and remote run of AutoML for a time-series model exploring Regression learners, Arima, Prophet and DNNs\n",
|
"2. Configuration and remote run of AutoML for a time-series model exploring DNNs\n",
|
||||||
"4. Evaluating the fitted model using a rolling test "
|
"4. Evaluating the fitted model using a rolling test "
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -92,8 +99,7 @@
|
|||||||
"# Squash warning messages for cleaner output in the notebook\n",
|
"# Squash warning messages for cleaner output in the notebook\n",
|
||||||
"warnings.showwarning = lambda *args, **kwargs: None\n",
|
"warnings.showwarning = lambda *args, **kwargs: None\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from azureml.core.workspace import Workspace\n",
|
"from azureml.core import Workspace, Experiment, Dataset\n",
|
||||||
"from azureml.core.experiment import Experiment\n",
|
|
||||||
"from azureml.train.automl import AutoMLConfig\n",
|
"from azureml.train.automl import AutoMLConfig\n",
|
||||||
"from matplotlib import pyplot as plt\n",
|
"from matplotlib import pyplot as plt\n",
|
||||||
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
|
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
|
||||||
@@ -104,7 +110,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -113,7 +119,6 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -139,18 +144,19 @@
|
|||||||
"ws = Workspace.from_config()\n",
|
"ws = Workspace.from_config()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# choose a name for the run history container in the workspace\n",
|
"# choose a name for the run history container in the workspace\n",
|
||||||
"experiment_name = 'beer-remote-cpu'\n",
|
"experiment_name = \"github-remote-cpu\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"experiment = Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = {}\n",
|
"output = {}\n",
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
"output['Workspace'] = ws.name\n",
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output['Run History Name'] = experiment_name\n",
|
"output[\"Run History Name\"] = experiment_name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -162,7 +168,9 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"### Using AmlCompute\n",
|
"### Using AmlCompute\n",
|
||||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource."
|
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -178,15 +186,16 @@
|
|||||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Choose a name for your CPU cluster\n",
|
"# Choose a name for your CPU cluster\n",
|
||||||
"cpu_cluster_name = \"beer-cluster\"\n",
|
"cpu_cluster_name = \"github-cluster\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
" max_nodes=4)\n",
|
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||||
|
" )\n",
|
||||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
@@ -200,7 +209,7 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"## Data\n",
|
"## Data\n",
|
||||||
"Read Beer demand data from file, and preview data."
|
"Read Github DAU data from file, and preview data."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -243,17 +252,19 @@
|
|||||||
"plt.tight_layout()\n",
|
"plt.tight_layout()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"plt.subplot(2, 1, 1)\n",
|
"plt.subplot(2, 1, 1)\n",
|
||||||
"plt.title('Beer Production By Year')\n",
|
"plt.title(\"Github Daily Active User By Year\")\n",
|
||||||
"df = pd.read_csv(\"Beer_no_valid_split_train.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n",
|
"df = pd.read_csv(\"github_dau_2011-2018_train.csv\", parse_dates=True, index_col=\"date\")\n",
|
||||||
"test_df = pd.read_csv(\"Beer_no_valid_split_test.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n",
|
"test_df = pd.read_csv(\n",
|
||||||
|
" \"github_dau_2011-2018_test.csv\", parse_dates=True, index_col=\"date\"\n",
|
||||||
|
")\n",
|
||||||
"plt.plot(df)\n",
|
"plt.plot(df)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"plt.subplot(2, 1, 2)\n",
|
"plt.subplot(2, 1, 2)\n",
|
||||||
"plt.title('Beer Production By Month')\n",
|
"plt.title(\"Github Daily Active User By Month\")\n",
|
||||||
"groups = df.groupby(df.index.month)\n",
|
"groups = df.groupby(df.index.month)\n",
|
||||||
"months = concat([DataFrame(x[1].values) for x in groups], axis=1)\n",
|
"months = concat([DataFrame(x[1].values) for x in groups], axis=1)\n",
|
||||||
"months = DataFrame(months)\n",
|
"months = DataFrame(months)\n",
|
||||||
"months.columns = range(1,13)\n",
|
"months.columns = range(1, 49)\n",
|
||||||
"months.boxplot()\n",
|
"months.boxplot()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
@@ -268,10 +279,10 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"target_column_name = 'BeerProduction'\n",
|
"target_column_name = \"count\"\n",
|
||||||
"time_column_name = 'DATE'\n",
|
"time_column_name = \"date\"\n",
|
||||||
"time_series_id_column_names = []\n",
|
"time_series_id_column_names = []\n",
|
||||||
"freq = 'M' #Monthly data"
|
"freq = \"D\" # Daily data"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -294,19 +305,22 @@
|
|||||||
"from helper import split_full_for_forecasting\n",
|
"from helper import split_full_for_forecasting\n",
|
||||||
"\n",
|
"\n",
|
||||||
"train, valid = split_full_for_forecasting(df, time_column_name)\n",
|
"train, valid = split_full_for_forecasting(df, time_column_name)\n",
|
||||||
"train.to_csv(\"train.csv\")\n",
|
"\n",
|
||||||
"valid.to_csv(\"valid.csv\")\n",
|
"# Reset index to create a Tabualr Dataset.\n",
|
||||||
"test_df.to_csv(\"test.csv\")\n",
|
"train.reset_index(inplace=True)\n",
|
||||||
|
"valid.reset_index(inplace=True)\n",
|
||||||
|
"test_df.reset_index(inplace=True)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"datastore = ws.get_default_datastore()\n",
|
"datastore = ws.get_default_datastore()\n",
|
||||||
"datastore.upload_files(files = ['./train.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
"train_dataset = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||||
"datastore.upload_files(files = ['./valid.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
" train, target=(datastore, \"dataset/\"), name=\"Github_DAU_train\"\n",
|
||||||
"datastore.upload_files(files = ['./test.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
")\n",
|
||||||
"\n",
|
"valid_dataset = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||||
"from azureml.core import Dataset\n",
|
" valid, target=(datastore, \"dataset/\"), name=\"Github_DAU_valid\"\n",
|
||||||
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/train.csv')])\n",
|
")\n",
|
||||||
"valid_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/valid.csv')])\n",
|
"test_dataset = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||||
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])"
|
" test_df, target=(datastore, \"dataset/\"), name=\"Github_DAU_test\"\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -318,7 +332,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"### Setting forecaster maximum horizon \n",
|
"### Setting forecaster maximum horizon \n",
|
||||||
"\n",
|
"\n",
|
||||||
"The forecast horizon is the number of periods into the future that the model should predict. Here, we set the horizon to 12 periods (i.e. 12 months). Notice that this is much shorter than the number of months in the test set; we will need to use a rolling test to evaluate the performance on the whole test set. For more discussion of forecast horizons and guiding principles for setting them, please see the [energy demand notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand). "
|
"The forecast horizon is the number of periods into the future that the model should predict. Here, we set the horizon to 14 periods (i.e. 14 days). Notice that this is much shorter than the number of months in the test set; we will need to use a rolling test to evaluate the performance on the whole test set. For more discussion of forecast horizons and guiding principles for setting them, please see the [energy demand notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand). "
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -330,7 +344,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"forecast_horizon = 12"
|
"forecast_horizon = 14"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -364,22 +378,29 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||||
|
"\n",
|
||||||
"forecasting_parameters = ForecastingParameters(\n",
|
"forecasting_parameters = ForecastingParameters(\n",
|
||||||
" time_column_name=time_column_name, forecast_horizon=forecast_horizon\n",
|
" time_column_name=time_column_name,\n",
|
||||||
|
" forecast_horizon=forecast_horizon,\n",
|
||||||
|
" freq=\"D\", # Set the forecast frequency to be daily\n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
"# To only allow the TCNForecaster we set the allowed_models parameter to reflect this.\n",
|
||||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" experiment_timeout_hours = 1,\n",
|
" task=\"forecasting\",\n",
|
||||||
" training_data=train_dataset,\n",
|
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||||
" label_column_name=target_column_name,\n",
|
" experiment_timeout_hours=1.5,\n",
|
||||||
" validation_data=valid_dataset, \n",
|
" training_data=train_dataset,\n",
|
||||||
" verbosity=logging.INFO,\n",
|
" label_column_name=target_column_name,\n",
|
||||||
" compute_target=compute_target,\n",
|
" validation_data=valid_dataset,\n",
|
||||||
" max_concurrent_iterations=4,\n",
|
" verbosity=logging.INFO,\n",
|
||||||
" max_cores_per_iteration=-1,\n",
|
" compute_target=compute_target,\n",
|
||||||
" enable_dnn=True,\n",
|
" max_concurrent_iterations=4,\n",
|
||||||
" forecasting_parameters=forecasting_parameters)"
|
" max_cores_per_iteration=-1,\n",
|
||||||
|
" enable_dnn=True,\n",
|
||||||
|
" allowed_models=[\"TCNForecaster\"],\n",
|
||||||
|
" forecasting_parameters=forecasting_parameters,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -401,8 +422,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"remote_run = experiment.submit(automl_config, show_output= False)\n",
|
"remote_run = experiment.submit(automl_config, show_output=True)"
|
||||||
"remote_run"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -419,15 +439,6 @@
|
|||||||
"# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
"# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"remote_run.wait_for_completion()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
@@ -459,6 +470,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from helper import get_result_df\n",
|
"from helper import get_result_df\n",
|
||||||
|
"\n",
|
||||||
"summary_df = get_result_df(remote_run)\n",
|
"summary_df = get_result_df(remote_run)\n",
|
||||||
"summary_df"
|
"summary_df"
|
||||||
]
|
]
|
||||||
@@ -474,11 +486,14 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from azureml.core.run import Run\n",
|
"from azureml.core.run import Run\n",
|
||||||
"from azureml.widgets import RunDetails\n",
|
"from azureml.widgets import RunDetails\n",
|
||||||
"forecast_model = 'TCNForecaster'\n",
|
"\n",
|
||||||
"if not forecast_model in summary_df['run_id']:\n",
|
"forecast_model = \"TCNForecaster\"\n",
|
||||||
" forecast_model = 'ForecastTCN'\n",
|
"if not forecast_model in summary_df[\"run_id\"]:\n",
|
||||||
" \n",
|
" forecast_model = \"ForecastTCN\"\n",
|
||||||
"best_dnn_run_id = summary_df['run_id'][forecast_model]\n",
|
"\n",
|
||||||
|
"best_dnn_run_id = summary_df[summary_df[\"Score\"] == summary_df[\"Score\"].min()][\n",
|
||||||
|
" \"run_id\"\n",
|
||||||
|
"][forecast_model]\n",
|
||||||
"best_dnn_run = Run(experiment, best_dnn_run_id)"
|
"best_dnn_run = Run(experiment, best_dnn_run_id)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -492,7 +507,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"best_dnn_run.parent\n",
|
"best_dnn_run.parent\n",
|
||||||
"RunDetails(best_dnn_run.parent).show() "
|
"RunDetails(best_dnn_run.parent).show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -505,7 +520,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"best_dnn_run\n",
|
"best_dnn_run\n",
|
||||||
"RunDetails(best_dnn_run).show() "
|
"RunDetails(best_dnn_run).show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -539,8 +554,6 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.core import Dataset\n",
|
|
||||||
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])\n",
|
|
||||||
"# preview the first 3 rows of the dataset\n",
|
"# preview the first 3 rows of the dataset\n",
|
||||||
"test_dataset.take(5).to_pandas_dataframe()"
|
"test_dataset.take(5).to_pandas_dataframe()"
|
||||||
]
|
]
|
||||||
@@ -551,7 +564,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"compute_target = ws.compute_targets['beer-cluster']\n",
|
"compute_target = ws.compute_targets[\"github-cluster\"]\n",
|
||||||
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
|
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -567,9 +580,9 @@
|
|||||||
"import os\n",
|
"import os\n",
|
||||||
"import shutil\n",
|
"import shutil\n",
|
||||||
"\n",
|
"\n",
|
||||||
"script_folder = os.path.join(os.getcwd(), 'inference')\n",
|
"script_folder = os.path.join(os.getcwd(), \"inference\")\n",
|
||||||
"os.makedirs(script_folder, exist_ok=True)\n",
|
"os.makedirs(script_folder, exist_ok=True)\n",
|
||||||
"shutil.copy('infer.py', script_folder)"
|
"shutil.copy(\"infer.py\", script_folder)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -580,8 +593,18 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from helper import run_inference\n",
|
"from helper import run_inference\n",
|
||||||
"\n",
|
"\n",
|
||||||
"test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run, test_dataset, valid_dataset, forecast_horizon,\n",
|
"test_run = run_inference(\n",
|
||||||
" target_column_name, time_column_name, freq)"
|
" test_experiment,\n",
|
||||||
|
" compute_target,\n",
|
||||||
|
" script_folder,\n",
|
||||||
|
" best_dnn_run,\n",
|
||||||
|
" test_dataset,\n",
|
||||||
|
" valid_dataset,\n",
|
||||||
|
" forecast_horizon,\n",
|
||||||
|
" target_column_name,\n",
|
||||||
|
" time_column_name,\n",
|
||||||
|
" freq,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -601,8 +624,19 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from helper import run_multiple_inferences\n",
|
"from helper import run_multiple_inferences\n",
|
||||||
"\n",
|
"\n",
|
||||||
"summary_df = run_multiple_inferences(summary_df, experiment, test_experiment, compute_target, script_folder, test_dataset, \n",
|
"summary_df = run_multiple_inferences(\n",
|
||||||
" valid_dataset, forecast_horizon, target_column_name, time_column_name, freq)"
|
" summary_df,\n",
|
||||||
|
" experiment,\n",
|
||||||
|
" test_experiment,\n",
|
||||||
|
" compute_target,\n",
|
||||||
|
" script_folder,\n",
|
||||||
|
" test_dataset,\n",
|
||||||
|
" valid_dataset,\n",
|
||||||
|
" forecast_horizon,\n",
|
||||||
|
" target_column_name,\n",
|
||||||
|
" time_column_name,\n",
|
||||||
|
" freq,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -622,7 +656,7 @@
|
|||||||
" test_run = Run(test_experiment, test_run_id)\n",
|
" test_run = Run(test_experiment, test_run_id)\n",
|
||||||
" test_run.wait_for_completion()\n",
|
" test_run.wait_for_completion()\n",
|
||||||
" test_score = test_run.get_metrics()[run_summary.primary_metric]\n",
|
" test_score = test_run.get_metrics()[run_summary.primary_metric]\n",
|
||||||
" summary_df.loc[summary_df.run_id == run_id, 'Test Score'] = test_score\n",
|
" summary_df.loc[summary_df.run_id == run_id, \"Test Score\"] = test_score\n",
|
||||||
" print(\"Test Score: \", test_score)"
|
" print(\"Test Score: \", test_score)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -654,9 +688,9 @@
|
|||||||
],
|
],
|
||||||
"hide_code_all_hidden": false,
|
"hide_code_all_hidden": false,
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.6",
|
"display_name": "Python 3.8 - AzureML",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python36"
|
"name": "python38-azureml"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
@@ -668,9 +702,9 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.6.7"
|
"version": "3.8.10"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 2
|
"nbformat_minor": 4
|
||||||
}
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-github-dau
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -0,0 +1,455 @@
|
|||||||
|
date,count,day_of_week,month_of_year,holiday
|
||||||
|
2017-06-04,104663,6.0,5.0,0.0
|
||||||
|
2017-06-05,155824,0.0,5.0,0.0
|
||||||
|
2017-06-06,164908,1.0,5.0,0.0
|
||||||
|
2017-06-07,170309,2.0,5.0,0.0
|
||||||
|
2017-06-08,164256,3.0,5.0,0.0
|
||||||
|
2017-06-09,153406,4.0,5.0,0.0
|
||||||
|
2017-06-10,97024,5.0,5.0,0.0
|
||||||
|
2017-06-11,103442,6.0,5.0,0.0
|
||||||
|
2017-06-12,160768,0.0,5.0,0.0
|
||||||
|
2017-06-13,166288,1.0,5.0,0.0
|
||||||
|
2017-06-14,163819,2.0,5.0,0.0
|
||||||
|
2017-06-15,157593,3.0,5.0,0.0
|
||||||
|
2017-06-16,149259,4.0,5.0,0.0
|
||||||
|
2017-06-17,95579,5.0,5.0,0.0
|
||||||
|
2017-06-18,98723,6.0,5.0,0.0
|
||||||
|
2017-06-19,159076,0.0,5.0,0.0
|
||||||
|
2017-06-20,163340,1.0,5.0,0.0
|
||||||
|
2017-06-21,163344,2.0,5.0,0.0
|
||||||
|
2017-06-22,159528,3.0,5.0,0.0
|
||||||
|
2017-06-23,146563,4.0,5.0,0.0
|
||||||
|
2017-06-24,92631,5.0,5.0,0.0
|
||||||
|
2017-06-25,96549,6.0,5.0,0.0
|
||||||
|
2017-06-26,153249,0.0,5.0,0.0
|
||||||
|
2017-06-27,160357,1.0,5.0,0.0
|
||||||
|
2017-06-28,159941,2.0,5.0,0.0
|
||||||
|
2017-06-29,156781,3.0,5.0,0.0
|
||||||
|
2017-06-30,144709,4.0,5.0,0.0
|
||||||
|
2017-07-01,89101,5.0,6.0,0.0
|
||||||
|
2017-07-02,93046,6.0,6.0,0.0
|
||||||
|
2017-07-03,144113,0.0,6.0,0.0
|
||||||
|
2017-07-04,143061,1.0,6.0,1.0
|
||||||
|
2017-07-05,154603,2.0,6.0,0.0
|
||||||
|
2017-07-06,157200,3.0,6.0,0.0
|
||||||
|
2017-07-07,147213,4.0,6.0,0.0
|
||||||
|
2017-07-08,92348,5.0,6.0,0.0
|
||||||
|
2017-07-09,97018,6.0,6.0,0.0
|
||||||
|
2017-07-10,157192,0.0,6.0,0.0
|
||||||
|
2017-07-11,161819,1.0,6.0,0.0
|
||||||
|
2017-07-12,161998,2.0,6.0,0.0
|
||||||
|
2017-07-13,160280,3.0,6.0,0.0
|
||||||
|
2017-07-14,146818,4.0,6.0,0.0
|
||||||
|
2017-07-15,93041,5.0,6.0,0.0
|
||||||
|
2017-07-16,97505,6.0,6.0,0.0
|
||||||
|
2017-07-17,156167,0.0,6.0,0.0
|
||||||
|
2017-07-18,162855,1.0,6.0,0.0
|
||||||
|
2017-07-19,162519,2.0,6.0,0.0
|
||||||
|
2017-07-20,159941,3.0,6.0,0.0
|
||||||
|
2017-07-21,148460,4.0,6.0,0.0
|
||||||
|
2017-07-22,93431,5.0,6.0,0.0
|
||||||
|
2017-07-23,98553,6.0,6.0,0.0
|
||||||
|
2017-07-24,156202,0.0,6.0,0.0
|
||||||
|
2017-07-25,162503,1.0,6.0,0.0
|
||||||
|
2017-07-26,158479,2.0,6.0,0.0
|
||||||
|
2017-07-27,158192,3.0,6.0,0.0
|
||||||
|
2017-07-28,147108,4.0,6.0,0.0
|
||||||
|
2017-07-29,93799,5.0,6.0,0.0
|
||||||
|
2017-07-30,97920,6.0,6.0,0.0
|
||||||
|
2017-07-31,152197,0.0,6.0,0.0
|
||||||
|
2017-08-01,158477,1.0,7.0,0.0
|
||||||
|
2017-08-02,159089,2.0,7.0,0.0
|
||||||
|
2017-08-03,157182,3.0,7.0,0.0
|
||||||
|
2017-08-04,146345,4.0,7.0,0.0
|
||||||
|
2017-08-05,92534,5.0,7.0,0.0
|
||||||
|
2017-08-06,97128,6.0,7.0,0.0
|
||||||
|
2017-08-07,151359,0.0,7.0,0.0
|
||||||
|
2017-08-08,159895,1.0,7.0,0.0
|
||||||
|
2017-08-09,158329,2.0,7.0,0.0
|
||||||
|
2017-08-10,155468,3.0,7.0,0.0
|
||||||
|
2017-08-11,144914,4.0,7.0,0.0
|
||||||
|
2017-08-12,92258,5.0,7.0,0.0
|
||||||
|
2017-08-13,95933,6.0,7.0,0.0
|
||||||
|
2017-08-14,147706,0.0,7.0,0.0
|
||||||
|
2017-08-15,151115,1.0,7.0,0.0
|
||||||
|
2017-08-16,157640,2.0,7.0,0.0
|
||||||
|
2017-08-17,156600,3.0,7.0,0.0
|
||||||
|
2017-08-18,146980,4.0,7.0,0.0
|
||||||
|
2017-08-19,94592,5.0,7.0,0.0
|
||||||
|
2017-08-20,99320,6.0,7.0,0.0
|
||||||
|
2017-08-21,145727,0.0,7.0,0.0
|
||||||
|
2017-08-22,160260,1.0,7.0,0.0
|
||||||
|
2017-08-23,160440,2.0,7.0,0.0
|
||||||
|
2017-08-24,157830,3.0,7.0,0.0
|
||||||
|
2017-08-25,145822,4.0,7.0,0.0
|
||||||
|
2017-08-26,94706,5.0,7.0,0.0
|
||||||
|
2017-08-27,99047,6.0,7.0,0.0
|
||||||
|
2017-08-28,152112,0.0,7.0,0.0
|
||||||
|
2017-08-29,162440,1.0,7.0,0.0
|
||||||
|
2017-08-30,162902,2.0,7.0,0.0
|
||||||
|
2017-08-31,159498,3.0,7.0,0.0
|
||||||
|
2017-09-01,145689,4.0,8.0,0.0
|
||||||
|
2017-09-02,93589,5.0,8.0,0.0
|
||||||
|
2017-09-03,100058,6.0,8.0,0.0
|
||||||
|
2017-09-04,140865,0.0,8.0,1.0
|
||||||
|
2017-09-05,165715,1.0,8.0,0.0
|
||||||
|
2017-09-06,167463,2.0,8.0,0.0
|
||||||
|
2017-09-07,164811,3.0,8.0,0.0
|
||||||
|
2017-09-08,156157,4.0,8.0,0.0
|
||||||
|
2017-09-09,101358,5.0,8.0,0.0
|
||||||
|
2017-09-10,107915,6.0,8.0,0.0
|
||||||
|
2017-09-11,167845,0.0,8.0,0.0
|
||||||
|
2017-09-12,172756,1.0,8.0,0.0
|
||||||
|
2017-09-13,172851,2.0,8.0,0.0
|
||||||
|
2017-09-14,171675,3.0,8.0,0.0
|
||||||
|
2017-09-15,159266,4.0,8.0,0.0
|
||||||
|
2017-09-16,103547,5.0,8.0,0.0
|
||||||
|
2017-09-17,110964,6.0,8.0,0.0
|
||||||
|
2017-09-18,170976,0.0,8.0,0.0
|
||||||
|
2017-09-19,177864,1.0,8.0,0.0
|
||||||
|
2017-09-20,173567,2.0,8.0,0.0
|
||||||
|
2017-09-21,172017,3.0,8.0,0.0
|
||||||
|
2017-09-22,161357,4.0,8.0,0.0
|
||||||
|
2017-09-23,104681,5.0,8.0,0.0
|
||||||
|
2017-09-24,111711,6.0,8.0,0.0
|
||||||
|
2017-09-25,173517,0.0,8.0,0.0
|
||||||
|
2017-09-26,180049,1.0,8.0,0.0
|
||||||
|
2017-09-27,178307,2.0,8.0,0.0
|
||||||
|
2017-09-28,174157,3.0,8.0,0.0
|
||||||
|
2017-09-29,161707,4.0,8.0,0.0
|
||||||
|
2017-09-30,110536,5.0,8.0,0.0
|
||||||
|
2017-10-01,106505,6.0,9.0,0.0
|
||||||
|
2017-10-02,157565,0.0,9.0,0.0
|
||||||
|
2017-10-03,164764,1.0,9.0,0.0
|
||||||
|
2017-10-04,163383,2.0,9.0,0.0
|
||||||
|
2017-10-05,162847,3.0,9.0,0.0
|
||||||
|
2017-10-06,153575,4.0,9.0,0.0
|
||||||
|
2017-10-07,107472,5.0,9.0,0.0
|
||||||
|
2017-10-08,116127,6.0,9.0,0.0
|
||||||
|
2017-10-09,174457,0.0,9.0,1.0
|
||||||
|
2017-10-10,185217,1.0,9.0,0.0
|
||||||
|
2017-10-11,185120,2.0,9.0,0.0
|
||||||
|
2017-10-12,180844,3.0,9.0,0.0
|
||||||
|
2017-10-13,170178,4.0,9.0,0.0
|
||||||
|
2017-10-14,112754,5.0,9.0,0.0
|
||||||
|
2017-10-15,121251,6.0,9.0,0.0
|
||||||
|
2017-10-16,183906,0.0,9.0,0.0
|
||||||
|
2017-10-17,188945,1.0,9.0,0.0
|
||||||
|
2017-10-18,187297,2.0,9.0,0.0
|
||||||
|
2017-10-19,183867,3.0,9.0,0.0
|
||||||
|
2017-10-20,173021,4.0,9.0,0.0
|
||||||
|
2017-10-21,115851,5.0,9.0,0.0
|
||||||
|
2017-10-22,126088,6.0,9.0,0.0
|
||||||
|
2017-10-23,189452,0.0,9.0,0.0
|
||||||
|
2017-10-24,194412,1.0,9.0,0.0
|
||||||
|
2017-10-25,192293,2.0,9.0,0.0
|
||||||
|
2017-10-26,190163,3.0,9.0,0.0
|
||||||
|
2017-10-27,177053,4.0,9.0,0.0
|
||||||
|
2017-10-28,114934,5.0,9.0,0.0
|
||||||
|
2017-10-29,125289,6.0,9.0,0.0
|
||||||
|
2017-10-30,189245,0.0,9.0,0.0
|
||||||
|
2017-10-31,191480,1.0,9.0,0.0
|
||||||
|
2017-11-01,182281,2.0,10.0,0.0
|
||||||
|
2017-11-02,186351,3.0,10.0,0.0
|
||||||
|
2017-11-03,175422,4.0,10.0,0.0
|
||||||
|
2017-11-04,118160,5.0,10.0,0.0
|
||||||
|
2017-11-05,127602,6.0,10.0,0.0
|
||||||
|
2017-11-06,191067,0.0,10.0,0.0
|
||||||
|
2017-11-07,197083,1.0,10.0,0.0
|
||||||
|
2017-11-08,194333,2.0,10.0,0.0
|
||||||
|
2017-11-09,193914,3.0,10.0,0.0
|
||||||
|
2017-11-10,179933,4.0,10.0,1.0
|
||||||
|
2017-11-11,121346,5.0,10.0,0.0
|
||||||
|
2017-11-12,131900,6.0,10.0,0.0
|
||||||
|
2017-11-13,196969,0.0,10.0,0.0
|
||||||
|
2017-11-14,201949,1.0,10.0,0.0
|
||||||
|
2017-11-15,198424,2.0,10.0,0.0
|
||||||
|
2017-11-16,196902,3.0,10.0,0.0
|
||||||
|
2017-11-17,183893,4.0,10.0,0.0
|
||||||
|
2017-11-18,122767,5.0,10.0,0.0
|
||||||
|
2017-11-19,130890,6.0,10.0,0.0
|
||||||
|
2017-11-20,194515,0.0,10.0,0.0
|
||||||
|
2017-11-21,198601,1.0,10.0,0.0
|
||||||
|
2017-11-22,191041,2.0,10.0,0.0
|
||||||
|
2017-11-23,170321,3.0,10.0,1.0
|
||||||
|
2017-11-24,155623,4.0,10.0,0.0
|
||||||
|
2017-11-25,115759,5.0,10.0,0.0
|
||||||
|
2017-11-26,128771,6.0,10.0,0.0
|
||||||
|
2017-11-27,199419,0.0,10.0,0.0
|
||||||
|
2017-11-28,207253,1.0,10.0,0.0
|
||||||
|
2017-11-29,205406,2.0,10.0,0.0
|
||||||
|
2017-11-30,200674,3.0,10.0,0.0
|
||||||
|
2017-12-01,187017,4.0,11.0,0.0
|
||||||
|
2017-12-02,129735,5.0,11.0,0.0
|
||||||
|
2017-12-03,139120,6.0,11.0,0.0
|
||||||
|
2017-12-04,205505,0.0,11.0,0.0
|
||||||
|
2017-12-05,208218,1.0,11.0,0.0
|
||||||
|
2017-12-06,202480,2.0,11.0,0.0
|
||||||
|
2017-12-07,197822,3.0,11.0,0.0
|
||||||
|
2017-12-08,180686,4.0,11.0,0.0
|
||||||
|
2017-12-09,123667,5.0,11.0,0.0
|
||||||
|
2017-12-10,130987,6.0,11.0,0.0
|
||||||
|
2017-12-11,193901,0.0,11.0,0.0
|
||||||
|
2017-12-12,194997,1.0,11.0,0.0
|
||||||
|
2017-12-13,192063,2.0,11.0,0.0
|
||||||
|
2017-12-14,186496,3.0,11.0,0.0
|
||||||
|
2017-12-15,170812,4.0,11.0,0.0
|
||||||
|
2017-12-16,110474,5.0,11.0,0.0
|
||||||
|
2017-12-17,118165,6.0,11.0,0.0
|
||||||
|
2017-12-18,176843,0.0,11.0,0.0
|
||||||
|
2017-12-19,179550,1.0,11.0,0.0
|
||||||
|
2017-12-20,173506,2.0,11.0,0.0
|
||||||
|
2017-12-21,165910,3.0,11.0,0.0
|
||||||
|
2017-12-22,145886,4.0,11.0,0.0
|
||||||
|
2017-12-23,95246,5.0,11.0,0.0
|
||||||
|
2017-12-24,88781,6.0,11.0,0.0
|
||||||
|
2017-12-25,98189,0.0,11.0,1.0
|
||||||
|
2017-12-26,121383,1.0,11.0,0.0
|
||||||
|
2017-12-27,135300,2.0,11.0,0.0
|
||||||
|
2017-12-28,136827,3.0,11.0,0.0
|
||||||
|
2017-12-29,127700,4.0,11.0,0.0
|
||||||
|
2017-12-30,93014,5.0,11.0,0.0
|
||||||
|
2017-12-31,82878,6.0,11.0,0.0
|
||||||
|
2018-01-01,86419,0.0,0.0,1.0
|
||||||
|
2018-01-02,147428,1.0,0.0,0.0
|
||||||
|
2018-01-03,162193,2.0,0.0,0.0
|
||||||
|
2018-01-04,163784,3.0,0.0,0.0
|
||||||
|
2018-01-05,158606,4.0,0.0,0.0
|
||||||
|
2018-01-06,113467,5.0,0.0,0.0
|
||||||
|
2018-01-07,118313,6.0,0.0,0.0
|
||||||
|
2018-01-08,175623,0.0,0.0,0.0
|
||||||
|
2018-01-09,183880,1.0,0.0,0.0
|
||||||
|
2018-01-10,183945,2.0,0.0,0.0
|
||||||
|
2018-01-11,181769,3.0,0.0,0.0
|
||||||
|
2018-01-12,170552,4.0,0.0,0.0
|
||||||
|
2018-01-13,115707,5.0,0.0,0.0
|
||||||
|
2018-01-14,121191,6.0,0.0,0.0
|
||||||
|
2018-01-15,176127,0.0,0.0,1.0
|
||||||
|
2018-01-16,188032,1.0,0.0,0.0
|
||||||
|
2018-01-17,189871,2.0,0.0,0.0
|
||||||
|
2018-01-18,189348,3.0,0.0,0.0
|
||||||
|
2018-01-19,177456,4.0,0.0,0.0
|
||||||
|
2018-01-20,123321,5.0,0.0,0.0
|
||||||
|
2018-01-21,128306,6.0,0.0,0.0
|
||||||
|
2018-01-22,186132,0.0,0.0,0.0
|
||||||
|
2018-01-23,197618,1.0,0.0,0.0
|
||||||
|
2018-01-24,196402,2.0,0.0,0.0
|
||||||
|
2018-01-25,192722,3.0,0.0,0.0
|
||||||
|
2018-01-26,179415,4.0,0.0,0.0
|
||||||
|
2018-01-27,125769,5.0,0.0,0.0
|
||||||
|
2018-01-28,133306,6.0,0.0,0.0
|
||||||
|
2018-01-29,194151,0.0,0.0,0.0
|
||||||
|
2018-01-30,198680,1.0,0.0,0.0
|
||||||
|
2018-01-31,198652,2.0,0.0,0.0
|
||||||
|
2018-02-01,195472,3.0,1.0,0.0
|
||||||
|
2018-02-02,183173,4.0,1.0,0.0
|
||||||
|
2018-02-03,124276,5.0,1.0,0.0
|
||||||
|
2018-02-04,129054,6.0,1.0,0.0
|
||||||
|
2018-02-05,190024,0.0,1.0,0.0
|
||||||
|
2018-02-06,198658,1.0,1.0,0.0
|
||||||
|
2018-02-07,198272,2.0,1.0,0.0
|
||||||
|
2018-02-08,195339,3.0,1.0,0.0
|
||||||
|
2018-02-09,183086,4.0,1.0,0.0
|
||||||
|
2018-02-10,122536,5.0,1.0,0.0
|
||||||
|
2018-02-11,133033,6.0,1.0,0.0
|
||||||
|
2018-02-12,185386,0.0,1.0,0.0
|
||||||
|
2018-02-13,184789,1.0,1.0,0.0
|
||||||
|
2018-02-14,176089,2.0,1.0,0.0
|
||||||
|
2018-02-15,171317,3.0,1.0,0.0
|
||||||
|
2018-02-16,162693,4.0,1.0,0.0
|
||||||
|
2018-02-17,116342,5.0,1.0,0.0
|
||||||
|
2018-02-18,122466,6.0,1.0,0.0
|
||||||
|
2018-02-19,172364,0.0,1.0,1.0
|
||||||
|
2018-02-20,185896,1.0,1.0,0.0
|
||||||
|
2018-02-21,188166,2.0,1.0,0.0
|
||||||
|
2018-02-22,189427,3.0,1.0,0.0
|
||||||
|
2018-02-23,178732,4.0,1.0,0.0
|
||||||
|
2018-02-24,132664,5.0,1.0,0.0
|
||||||
|
2018-02-25,134008,6.0,1.0,0.0
|
||||||
|
2018-02-26,200075,0.0,1.0,0.0
|
||||||
|
2018-02-27,207996,1.0,1.0,0.0
|
||||||
|
2018-02-28,204416,2.0,1.0,0.0
|
||||||
|
2018-03-01,201320,3.0,2.0,0.0
|
||||||
|
2018-03-02,188205,4.0,2.0,0.0
|
||||||
|
2018-03-03,131162,5.0,2.0,0.0
|
||||||
|
2018-03-04,138320,6.0,2.0,0.0
|
||||||
|
2018-03-05,207326,0.0,2.0,0.0
|
||||||
|
2018-03-06,212462,1.0,2.0,0.0
|
||||||
|
2018-03-07,209357,2.0,2.0,0.0
|
||||||
|
2018-03-08,194876,3.0,2.0,0.0
|
||||||
|
2018-03-09,193761,4.0,2.0,0.0
|
||||||
|
2018-03-10,133449,5.0,2.0,0.0
|
||||||
|
2018-03-11,142258,6.0,2.0,0.0
|
||||||
|
2018-03-12,208753,0.0,2.0,0.0
|
||||||
|
2018-03-13,210602,1.0,2.0,0.0
|
||||||
|
2018-03-14,214236,2.0,2.0,0.0
|
||||||
|
2018-03-15,210761,3.0,2.0,0.0
|
||||||
|
2018-03-16,196619,4.0,2.0,0.0
|
||||||
|
2018-03-17,133056,5.0,2.0,0.0
|
||||||
|
2018-03-18,141335,6.0,2.0,0.0
|
||||||
|
2018-03-19,211580,0.0,2.0,0.0
|
||||||
|
2018-03-20,219051,1.0,2.0,0.0
|
||||||
|
2018-03-21,215435,2.0,2.0,0.0
|
||||||
|
2018-03-22,211961,3.0,2.0,0.0
|
||||||
|
2018-03-23,196009,4.0,2.0,0.0
|
||||||
|
2018-03-24,132390,5.0,2.0,0.0
|
||||||
|
2018-03-25,140021,6.0,2.0,0.0
|
||||||
|
2018-03-26,205273,0.0,2.0,0.0
|
||||||
|
2018-03-27,212686,1.0,2.0,0.0
|
||||||
|
2018-03-28,210683,2.0,2.0,0.0
|
||||||
|
2018-03-29,189044,3.0,2.0,0.0
|
||||||
|
2018-03-30,170256,4.0,2.0,0.0
|
||||||
|
2018-03-31,125999,5.0,2.0,0.0
|
||||||
|
2018-04-01,126749,6.0,3.0,0.0
|
||||||
|
2018-04-02,186546,0.0,3.0,0.0
|
||||||
|
2018-04-03,207905,1.0,3.0,0.0
|
||||||
|
2018-04-04,201528,2.0,3.0,0.0
|
||||||
|
2018-04-05,188580,3.0,3.0,0.0
|
||||||
|
2018-04-06,173714,4.0,3.0,0.0
|
||||||
|
2018-04-07,125723,5.0,3.0,0.0
|
||||||
|
2018-04-08,142545,6.0,3.0,0.0
|
||||||
|
2018-04-09,204767,0.0,3.0,0.0
|
||||||
|
2018-04-10,212048,1.0,3.0,0.0
|
||||||
|
2018-04-11,210517,2.0,3.0,0.0
|
||||||
|
2018-04-12,206924,3.0,3.0,0.0
|
||||||
|
2018-04-13,191679,4.0,3.0,0.0
|
||||||
|
2018-04-14,126394,5.0,3.0,0.0
|
||||||
|
2018-04-15,137279,6.0,3.0,0.0
|
||||||
|
2018-04-16,208085,0.0,3.0,0.0
|
||||||
|
2018-04-17,213273,1.0,3.0,0.0
|
||||||
|
2018-04-18,211580,2.0,3.0,0.0
|
||||||
|
2018-04-19,206037,3.0,3.0,0.0
|
||||||
|
2018-04-20,191211,4.0,3.0,0.0
|
||||||
|
2018-04-21,125564,5.0,3.0,0.0
|
||||||
|
2018-04-22,136469,6.0,3.0,0.0
|
||||||
|
2018-04-23,206288,0.0,3.0,0.0
|
||||||
|
2018-04-24,212115,1.0,3.0,0.0
|
||||||
|
2018-04-25,207948,2.0,3.0,0.0
|
||||||
|
2018-04-26,205759,3.0,3.0,0.0
|
||||||
|
2018-04-27,181330,4.0,3.0,0.0
|
||||||
|
2018-04-28,130046,5.0,3.0,0.0
|
||||||
|
2018-04-29,120802,6.0,3.0,0.0
|
||||||
|
2018-04-30,170390,0.0,3.0,0.0
|
||||||
|
2018-05-01,169054,1.0,4.0,0.0
|
||||||
|
2018-05-02,197891,2.0,4.0,0.0
|
||||||
|
2018-05-03,199820,3.0,4.0,0.0
|
||||||
|
2018-05-04,186783,4.0,4.0,0.0
|
||||||
|
2018-05-05,124420,5.0,4.0,0.0
|
||||||
|
2018-05-06,130666,6.0,4.0,0.0
|
||||||
|
2018-05-07,196014,0.0,4.0,0.0
|
||||||
|
2018-05-08,203058,1.0,4.0,0.0
|
||||||
|
2018-05-09,198582,2.0,4.0,0.0
|
||||||
|
2018-05-10,191321,3.0,4.0,0.0
|
||||||
|
2018-05-11,183639,4.0,4.0,0.0
|
||||||
|
2018-05-12,122023,5.0,4.0,0.0
|
||||||
|
2018-05-13,128775,6.0,4.0,0.0
|
||||||
|
2018-05-14,199104,0.0,4.0,0.0
|
||||||
|
2018-05-15,200658,1.0,4.0,0.0
|
||||||
|
2018-05-16,201541,2.0,4.0,0.0
|
||||||
|
2018-05-17,196886,3.0,4.0,0.0
|
||||||
|
2018-05-18,188597,4.0,4.0,0.0
|
||||||
|
2018-05-19,121392,5.0,4.0,0.0
|
||||||
|
2018-05-20,126981,6.0,4.0,0.0
|
||||||
|
2018-05-21,189291,0.0,4.0,0.0
|
||||||
|
2018-05-22,203038,1.0,4.0,0.0
|
||||||
|
2018-05-23,205330,2.0,4.0,0.0
|
||||||
|
2018-05-24,199208,3.0,4.0,0.0
|
||||||
|
2018-05-25,187768,4.0,4.0,0.0
|
||||||
|
2018-05-26,117635,5.0,4.0,0.0
|
||||||
|
2018-05-27,124352,6.0,4.0,0.0
|
||||||
|
2018-05-28,180398,0.0,4.0,1.0
|
||||||
|
2018-05-29,194170,1.0,4.0,0.0
|
||||||
|
2018-05-30,200281,2.0,4.0,0.0
|
||||||
|
2018-05-31,197244,3.0,4.0,0.0
|
||||||
|
2018-06-01,184037,4.0,5.0,0.0
|
||||||
|
2018-06-02,121135,5.0,5.0,0.0
|
||||||
|
2018-06-03,129389,6.0,5.0,0.0
|
||||||
|
2018-06-04,200331,0.0,5.0,0.0
|
||||||
|
2018-06-05,207735,1.0,5.0,0.0
|
||||||
|
2018-06-06,203354,2.0,5.0,0.0
|
||||||
|
2018-06-07,200520,3.0,5.0,0.0
|
||||||
|
2018-06-08,182038,4.0,5.0,0.0
|
||||||
|
2018-06-09,120164,5.0,5.0,0.0
|
||||||
|
2018-06-10,125256,6.0,5.0,0.0
|
||||||
|
2018-06-11,194786,0.0,5.0,0.0
|
||||||
|
2018-06-12,200815,1.0,5.0,0.0
|
||||||
|
2018-06-13,197740,2.0,5.0,0.0
|
||||||
|
2018-06-14,192294,3.0,5.0,0.0
|
||||||
|
2018-06-15,173587,4.0,5.0,0.0
|
||||||
|
2018-06-16,105955,5.0,5.0,0.0
|
||||||
|
2018-06-17,110780,6.0,5.0,0.0
|
||||||
|
2018-06-18,174582,0.0,5.0,0.0
|
||||||
|
2018-06-19,193310,1.0,5.0,0.0
|
||||||
|
2018-06-20,193062,2.0,5.0,0.0
|
||||||
|
2018-06-21,187986,3.0,5.0,0.0
|
||||||
|
2018-06-22,173606,4.0,5.0,0.0
|
||||||
|
2018-06-23,111795,5.0,5.0,0.0
|
||||||
|
2018-06-24,116134,6.0,5.0,0.0
|
||||||
|
2018-06-25,185919,0.0,5.0,0.0
|
||||||
|
2018-06-26,193142,1.0,5.0,0.0
|
||||||
|
2018-06-27,188114,2.0,5.0,0.0
|
||||||
|
2018-06-28,183737,3.0,5.0,0.0
|
||||||
|
2018-06-29,171496,4.0,5.0,0.0
|
||||||
|
2018-06-30,107210,5.0,5.0,0.0
|
||||||
|
2018-07-01,111053,6.0,6.0,0.0
|
||||||
|
2018-07-02,176198,0.0,6.0,0.0
|
||||||
|
2018-07-03,184040,1.0,6.0,0.0
|
||||||
|
2018-07-04,169783,2.0,6.0,1.0
|
||||||
|
2018-07-05,177996,3.0,6.0,0.0
|
||||||
|
2018-07-06,167378,4.0,6.0,0.0
|
||||||
|
2018-07-07,106401,5.0,6.0,0.0
|
||||||
|
2018-07-08,112327,6.0,6.0,0.0
|
||||||
|
2018-07-09,182835,0.0,6.0,0.0
|
||||||
|
2018-07-10,187694,1.0,6.0,0.0
|
||||||
|
2018-07-11,185762,2.0,6.0,0.0
|
||||||
|
2018-07-12,184099,3.0,6.0,0.0
|
||||||
|
2018-07-13,170860,4.0,6.0,0.0
|
||||||
|
2018-07-14,106799,5.0,6.0,0.0
|
||||||
|
2018-07-15,108475,6.0,6.0,0.0
|
||||||
|
2018-07-16,175704,0.0,6.0,0.0
|
||||||
|
2018-07-17,183596,1.0,6.0,0.0
|
||||||
|
2018-07-18,179897,2.0,6.0,0.0
|
||||||
|
2018-07-19,183373,3.0,6.0,0.0
|
||||||
|
2018-07-20,169626,4.0,6.0,0.0
|
||||||
|
2018-07-21,106785,5.0,6.0,0.0
|
||||||
|
2018-07-22,112387,6.0,6.0,0.0
|
||||||
|
2018-07-23,180572,0.0,6.0,0.0
|
||||||
|
2018-07-24,186943,1.0,6.0,0.0
|
||||||
|
2018-07-25,185744,2.0,6.0,0.0
|
||||||
|
2018-07-26,183117,3.0,6.0,0.0
|
||||||
|
2018-07-27,168526,4.0,6.0,0.0
|
||||||
|
2018-07-28,105936,5.0,6.0,0.0
|
||||||
|
2018-07-29,111708,6.0,6.0,0.0
|
||||||
|
2018-07-30,179950,0.0,6.0,0.0
|
||||||
|
2018-07-31,185930,1.0,6.0,0.0
|
||||||
|
2018-08-01,183366,2.0,7.0,0.0
|
||||||
|
2018-08-02,182412,3.0,7.0,0.0
|
||||||
|
2018-08-03,173429,4.0,7.0,0.0
|
||||||
|
2018-08-04,106108,5.0,7.0,0.0
|
||||||
|
2018-08-05,110059,6.0,7.0,0.0
|
||||||
|
2018-08-06,178355,0.0,7.0,0.0
|
||||||
|
2018-08-07,185518,1.0,7.0,0.0
|
||||||
|
2018-08-08,183204,2.0,7.0,0.0
|
||||||
|
2018-08-09,181276,3.0,7.0,0.0
|
||||||
|
2018-08-10,168297,4.0,7.0,0.0
|
||||||
|
2018-08-11,106488,5.0,7.0,0.0
|
||||||
|
2018-08-12,111786,6.0,7.0,0.0
|
||||||
|
2018-08-13,178620,0.0,7.0,0.0
|
||||||
|
2018-08-14,181922,1.0,7.0,0.0
|
||||||
|
2018-08-15,172198,2.0,7.0,0.0
|
||||||
|
2018-08-16,177367,3.0,7.0,0.0
|
||||||
|
2018-08-17,166550,4.0,7.0,0.0
|
||||||
|
2018-08-18,107011,5.0,7.0,0.0
|
||||||
|
2018-08-19,112299,6.0,7.0,0.0
|
||||||
|
2018-08-20,176718,0.0,7.0,0.0
|
||||||
|
2018-08-21,182562,1.0,7.0,0.0
|
||||||
|
2018-08-22,181484,2.0,7.0,0.0
|
||||||
|
2018-08-23,180317,3.0,7.0,0.0
|
||||||
|
2018-08-24,170197,4.0,7.0,0.0
|
||||||
|
2018-08-25,109383,5.0,7.0,0.0
|
||||||
|
2018-08-26,113373,6.0,7.0,0.0
|
||||||
|
2018-08-27,180142,0.0,7.0,0.0
|
||||||
|
2018-08-28,191628,1.0,7.0,0.0
|
||||||
|
2018-08-29,191149,2.0,7.0,0.0
|
||||||
|
2018-08-30,187503,3.0,7.0,0.0
|
||||||
|
2018-08-31,172280,4.0,7.0,0.0
|
||||||
|
@@ -0,0 +1,176 @@
|
|||||||
|
import pandas as pd
|
||||||
|
from azureml.core import Environment
|
||||||
|
from azureml.core.conda_dependencies import CondaDependencies
|
||||||
|
from azureml.train.estimator import Estimator
|
||||||
|
from azureml.core.run import Run
|
||||||
|
from azureml.automl.core.shared import constants
|
||||||
|
|
||||||
|
|
||||||
|
def split_fraction_by_grain(df, fraction, time_column_name, grain_column_names=None):
|
||||||
|
if not grain_column_names:
|
||||||
|
df["tmp_grain_column"] = "grain"
|
||||||
|
grain_column_names = ["tmp_grain_column"]
|
||||||
|
|
||||||
|
"""Group df by grain and split on last n rows for each group."""
|
||||||
|
df_grouped = df.sort_values(time_column_name).groupby(
|
||||||
|
grain_column_names, group_keys=False
|
||||||
|
)
|
||||||
|
|
||||||
|
df_head = df_grouped.apply(
|
||||||
|
lambda dfg: dfg.iloc[: -int(len(dfg) * fraction)] if fraction > 0 else dfg
|
||||||
|
)
|
||||||
|
|
||||||
|
df_tail = df_grouped.apply(
|
||||||
|
lambda dfg: dfg.iloc[-int(len(dfg) * fraction) :] if fraction > 0 else dfg[:0]
|
||||||
|
)
|
||||||
|
|
||||||
|
if "tmp_grain_column" in grain_column_names:
|
||||||
|
for df2 in (df, df_head, df_tail):
|
||||||
|
df2.drop("tmp_grain_column", axis=1, inplace=True)
|
||||||
|
|
||||||
|
grain_column_names.remove("tmp_grain_column")
|
||||||
|
|
||||||
|
return df_head, df_tail
|
||||||
|
|
||||||
|
|
||||||
|
def split_full_for_forecasting(
|
||||||
|
df, time_column_name, grain_column_names=None, test_split=0.2
|
||||||
|
):
|
||||||
|
index_name = df.index.name
|
||||||
|
|
||||||
|
# Assumes that there isn't already a column called tmpindex
|
||||||
|
|
||||||
|
df["tmpindex"] = df.index
|
||||||
|
|
||||||
|
train_df, test_df = split_fraction_by_grain(
|
||||||
|
df, test_split, time_column_name, grain_column_names
|
||||||
|
)
|
||||||
|
|
||||||
|
train_df = train_df.set_index("tmpindex")
|
||||||
|
train_df.index.name = index_name
|
||||||
|
|
||||||
|
test_df = test_df.set_index("tmpindex")
|
||||||
|
test_df.index.name = index_name
|
||||||
|
|
||||||
|
df.drop("tmpindex", axis=1, inplace=True)
|
||||||
|
|
||||||
|
return train_df, test_df
|
||||||
|
|
||||||
|
|
||||||
|
def get_result_df(remote_run):
|
||||||
|
children = list(remote_run.get_children(recursive=True))
|
||||||
|
summary_df = pd.DataFrame(
|
||||||
|
index=["run_id", "run_algorithm", "primary_metric", "Score"]
|
||||||
|
)
|
||||||
|
goal_minimize = False
|
||||||
|
for run in children:
|
||||||
|
if (
|
||||||
|
run.get_status().lower() == constants.RunState.COMPLETE_RUN
|
||||||
|
and "run_algorithm" in run.properties
|
||||||
|
and "score" in run.properties
|
||||||
|
):
|
||||||
|
# We only count in the completed child runs.
|
||||||
|
summary_df[run.id] = [
|
||||||
|
run.id,
|
||||||
|
run.properties["run_algorithm"],
|
||||||
|
run.properties["primary_metric"],
|
||||||
|
float(run.properties["score"]),
|
||||||
|
]
|
||||||
|
if "goal" in run.properties:
|
||||||
|
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
|
||||||
|
|
||||||
|
summary_df = summary_df.T.sort_values("Score", ascending=goal_minimize)
|
||||||
|
summary_df = summary_df.set_index("run_algorithm")
|
||||||
|
return summary_df
|
||||||
|
|
||||||
|
|
||||||
|
def run_inference(
|
||||||
|
test_experiment,
|
||||||
|
compute_target,
|
||||||
|
script_folder,
|
||||||
|
train_run,
|
||||||
|
test_dataset,
|
||||||
|
lookback_dataset,
|
||||||
|
max_horizon,
|
||||||
|
target_column_name,
|
||||||
|
time_column_name,
|
||||||
|
freq,
|
||||||
|
):
|
||||||
|
model_base_name = "model.pkl"
|
||||||
|
if "model_data_location" in train_run.properties:
|
||||||
|
model_location = train_run.properties["model_data_location"]
|
||||||
|
_, model_base_name = model_location.rsplit("/", 1)
|
||||||
|
train_run.download_file(
|
||||||
|
"outputs/{}".format(model_base_name), "inference/{}".format(model_base_name)
|
||||||
|
)
|
||||||
|
|
||||||
|
inference_env = train_run.get_environment()
|
||||||
|
|
||||||
|
est = Estimator(
|
||||||
|
source_directory=script_folder,
|
||||||
|
entry_script="infer.py",
|
||||||
|
script_params={
|
||||||
|
"--max_horizon": max_horizon,
|
||||||
|
"--target_column_name": target_column_name,
|
||||||
|
"--time_column_name": time_column_name,
|
||||||
|
"--frequency": freq,
|
||||||
|
"--model_path": model_base_name,
|
||||||
|
},
|
||||||
|
inputs=[
|
||||||
|
test_dataset.as_named_input("test_data"),
|
||||||
|
lookback_dataset.as_named_input("lookback_data"),
|
||||||
|
],
|
||||||
|
compute_target=compute_target,
|
||||||
|
environment_definition=inference_env,
|
||||||
|
)
|
||||||
|
|
||||||
|
run = test_experiment.submit(
|
||||||
|
est,
|
||||||
|
tags={
|
||||||
|
"training_run_id": train_run.id,
|
||||||
|
"run_algorithm": train_run.properties["run_algorithm"],
|
||||||
|
"valid_score": train_run.properties["score"],
|
||||||
|
"primary_metric": train_run.properties["primary_metric"],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||||
|
return run
|
||||||
|
|
||||||
|
|
||||||
|
def run_multiple_inferences(
|
||||||
|
summary_df,
|
||||||
|
train_experiment,
|
||||||
|
test_experiment,
|
||||||
|
compute_target,
|
||||||
|
script_folder,
|
||||||
|
test_dataset,
|
||||||
|
lookback_dataset,
|
||||||
|
max_horizon,
|
||||||
|
target_column_name,
|
||||||
|
time_column_name,
|
||||||
|
freq,
|
||||||
|
):
|
||||||
|
for run_name, run_summary in summary_df.iterrows():
|
||||||
|
print(run_name)
|
||||||
|
print(run_summary)
|
||||||
|
run_id = run_summary.run_id
|
||||||
|
train_run = Run(train_experiment, run_id)
|
||||||
|
|
||||||
|
test_run = run_inference(
|
||||||
|
test_experiment,
|
||||||
|
compute_target,
|
||||||
|
script_folder,
|
||||||
|
train_run,
|
||||||
|
test_dataset,
|
||||||
|
lookback_dataset,
|
||||||
|
max_horizon,
|
||||||
|
target_column_name,
|
||||||
|
time_column_name,
|
||||||
|
freq,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(test_run)
|
||||||
|
summary_df.loc[summary_df.run_id == run_id, "test_run_id"] = test_run.id
|
||||||
|
|
||||||
|
return summary_df
|
||||||
@@ -0,0 +1,145 @@
|
|||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
import joblib
|
||||||
|
from sklearn.metrics import mean_absolute_error, mean_squared_error
|
||||||
|
|
||||||
|
from azureml.automl.runtime.shared.score import scoring, constants
|
||||||
|
from azureml.core import Run
|
||||||
|
|
||||||
|
try:
|
||||||
|
import torch
|
||||||
|
|
||||||
|
_torch_present = True
|
||||||
|
except ImportError:
|
||||||
|
_torch_present = False
|
||||||
|
|
||||||
|
|
||||||
|
def map_location_cuda(storage, loc):
|
||||||
|
return storage.cuda()
|
||||||
|
|
||||||
|
|
||||||
|
def APE(actual, pred):
|
||||||
|
"""
|
||||||
|
Calculate absolute percentage error.
|
||||||
|
Returns a vector of APE values with same length as actual/pred.
|
||||||
|
"""
|
||||||
|
return 100 * np.abs((actual - pred) / actual)
|
||||||
|
|
||||||
|
|
||||||
|
def MAPE(actual, pred):
|
||||||
|
"""
|
||||||
|
Calculate mean absolute percentage error.
|
||||||
|
Remove NA and values where actual is close to zero
|
||||||
|
"""
|
||||||
|
not_na = ~(np.isnan(actual) | np.isnan(pred))
|
||||||
|
not_zero = ~np.isclose(actual, 0.0)
|
||||||
|
actual_safe = actual[not_na & not_zero]
|
||||||
|
pred_safe = pred[not_na & not_zero]
|
||||||
|
return np.mean(APE(actual_safe, pred_safe))
|
||||||
|
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"--max_horizon",
|
||||||
|
type=int,
|
||||||
|
dest="max_horizon",
|
||||||
|
default=10,
|
||||||
|
help="Max Horizon for forecasting",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--target_column_name",
|
||||||
|
type=str,
|
||||||
|
dest="target_column_name",
|
||||||
|
help="Target Column Name",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--time_column_name", type=str, dest="time_column_name", help="Time Column Name"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--frequency", type=str, dest="freq", help="Frequency of prediction"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--model_path",
|
||||||
|
type=str,
|
||||||
|
dest="model_path",
|
||||||
|
default="model.pkl",
|
||||||
|
help="Filename of model to be loaded",
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
max_horizon = args.max_horizon
|
||||||
|
target_column_name = args.target_column_name
|
||||||
|
time_column_name = args.time_column_name
|
||||||
|
freq = args.freq
|
||||||
|
model_path = args.model_path
|
||||||
|
|
||||||
|
print("args passed are: ")
|
||||||
|
print(max_horizon)
|
||||||
|
print(target_column_name)
|
||||||
|
print(time_column_name)
|
||||||
|
print(freq)
|
||||||
|
print(model_path)
|
||||||
|
|
||||||
|
run = Run.get_context()
|
||||||
|
# get input dataset by name
|
||||||
|
test_dataset = run.input_datasets["test_data"]
|
||||||
|
|
||||||
|
grain_column_names = []
|
||||||
|
|
||||||
|
df = test_dataset.to_pandas_dataframe()
|
||||||
|
|
||||||
|
print("Read df")
|
||||||
|
print(df)
|
||||||
|
|
||||||
|
X_test_df = df
|
||||||
|
y_test = df.pop(target_column_name).to_numpy()
|
||||||
|
|
||||||
|
_, ext = os.path.splitext(model_path)
|
||||||
|
if ext == ".pt":
|
||||||
|
# Load the fc-tcn torch model.
|
||||||
|
assert _torch_present
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
map_location = map_location_cuda
|
||||||
|
else:
|
||||||
|
map_location = "cpu"
|
||||||
|
with open(model_path, "rb") as fh:
|
||||||
|
fitted_model = torch.load(fh, map_location=map_location)
|
||||||
|
else:
|
||||||
|
# Load the sklearn pipeline.
|
||||||
|
fitted_model = joblib.load(model_path)
|
||||||
|
|
||||||
|
X_rf = fitted_model.rolling_forecast(X_test_df, y_test, step=1)
|
||||||
|
assign_dict = {
|
||||||
|
fitted_model.forecast_origin_column_name: "forecast_origin",
|
||||||
|
fitted_model.forecast_column_name: "predicted",
|
||||||
|
fitted_model.actual_column_name: target_column_name,
|
||||||
|
}
|
||||||
|
X_rf.rename(columns=assign_dict, inplace=True)
|
||||||
|
|
||||||
|
print(X_rf.head())
|
||||||
|
|
||||||
|
# Use the AutoML scoring module
|
||||||
|
regression_metrics = list(constants.REGRESSION_SCALAR_SET)
|
||||||
|
y_test = np.array(X_rf[target_column_name])
|
||||||
|
y_pred = np.array(X_rf["predicted"])
|
||||||
|
scores = scoring.score_regression(y_test, y_pred, regression_metrics)
|
||||||
|
|
||||||
|
print("scores:")
|
||||||
|
print(scores)
|
||||||
|
|
||||||
|
for key, value in scores.items():
|
||||||
|
run.log(key, value)
|
||||||
|
|
||||||
|
print("Simple forecasting model")
|
||||||
|
rmse = np.sqrt(mean_squared_error(X_rf[target_column_name], X_rf["predicted"]))
|
||||||
|
print("[Test Data] \nRoot Mean squared error: %.2f" % rmse)
|
||||||
|
mae = mean_absolute_error(X_rf[target_column_name], X_rf["predicted"])
|
||||||
|
print("mean_absolute_error score: %.2f" % mae)
|
||||||
|
print("MAPE: %.2f" % MAPE(X_rf[target_column_name], X_rf["predicted"]))
|
||||||
|
|
||||||
|
run.log("rmse", rmse)
|
||||||
|
run.log("mae", mae)
|
||||||
@@ -0,0 +1,681 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<font color=\"red\" size=\"5\"><strong>!Important!</strong> </br>This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/pipelines/1k_demand_forecasting_with_pipeline_components/automl-forecasting-demand-hierarchical-timeseries-in-pipeline)).</font>"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Hierarchical Time Series - Automated ML\n",
|
||||||
|
"**_Generate hierarchical time series forecasts with Automated Machine Learning_**\n",
|
||||||
|
"\n",
|
||||||
|
"---"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
|
||||||
|
"\n",
|
||||||
|
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Prerequisites\n",
|
||||||
|
"You'll need to create a compute Instance by following [these](https://learn.microsoft.com/en-us/azure/machine-learning/v1/how-to-create-manage-compute-instance?tabs=python) instructions."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## 1.0 Set up workspace, datastore, experiment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1613003526897
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import azureml.core\n",
|
||||||
|
"from azureml.core import Workspace, Datastore\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"\n",
|
||||||
|
"# Set up your workspace\n",
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"ws.get_details()\n",
|
||||||
|
"\n",
|
||||||
|
"# Set up your datastores\n",
|
||||||
|
"dstore = ws.get_default_datastore()\n",
|
||||||
|
"\n",
|
||||||
|
"output = {}\n",
|
||||||
|
"output[\"SDK version\"] = azureml.core.VERSION\n",
|
||||||
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
|
"output[\"Location\"] = ws.location\n",
|
||||||
|
"output[\"Default datastore name\"] = dstore.name\n",
|
||||||
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
|
"outputDf.T"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Choose an experiment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1613003540729
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core import Experiment\n",
|
||||||
|
"\n",
|
||||||
|
"experiment = Experiment(ws, \"automl-hts\")\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"Experiment name: \" + experiment.name)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## 2.0 Data\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"nteract": {
|
||||||
|
"transient": {
|
||||||
|
"deleting": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"### Upload local csv files to datastore\n",
|
||||||
|
"You can upload your train and inference csv files to the default datastore in your workspace. \n",
|
||||||
|
"\n",
|
||||||
|
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
|
||||||
|
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore.datastore?view=azure-ml-py) documentation on how to access data from Datastore."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"datastore_path = \"hts-sample\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"datastore = ws.get_default_datastore()\n",
|
||||||
|
"datastore"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Create the TabularDatasets \n",
|
||||||
|
"\n",
|
||||||
|
"Datasets in Azure Machine Learning are references to specific data in a Datastore. The data can be retrieved as a [TabularDatasets](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py). We will read in the data as a pandas DataFrame, upload to the data store and register them to your Workspace using ```register_pandas_dataframe``` so they can be called as an input into the training pipeline. We will use the inference dataset as part of the forecasting pipeline. The step need only be completed once."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1613007017296
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||||
|
"\n",
|
||||||
|
"registered_train = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||||
|
" pd.read_csv(\"Data/hts-sample-train.csv\"),\n",
|
||||||
|
" target=(datastore, \"hts-sample\"),\n",
|
||||||
|
" name=\"hts-sales-train\",\n",
|
||||||
|
")\n",
|
||||||
|
"registered_inference = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||||
|
" pd.read_csv(\"Data/hts-sample-test.csv\"),\n",
|
||||||
|
" target=(datastore, \"hts-sample\"),\n",
|
||||||
|
" name=\"hts-sales-test\",\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## 3.0 Build the training pipeline\n",
|
||||||
|
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Choose a compute target\n",
|
||||||
|
"\n",
|
||||||
|
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
|
||||||
|
"\n",
|
||||||
|
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1613007037308
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
|
"\n",
|
||||||
|
"# Name your cluster\n",
|
||||||
|
"compute_name = \"hts-compute\"\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"if compute_name in ws.compute_targets:\n",
|
||||||
|
" compute_target = ws.compute_targets[compute_name]\n",
|
||||||
|
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||||
|
" print(\"Found compute target: \" + compute_name)\n",
|
||||||
|
"else:\n",
|
||||||
|
" print(\"Creating a new compute target...\")\n",
|
||||||
|
" provisioning_config = AmlCompute.provisioning_configuration(\n",
|
||||||
|
" vm_size=\"STANDARD_D16S_V3\", max_nodes=20\n",
|
||||||
|
" )\n",
|
||||||
|
" # Create the compute target\n",
|
||||||
|
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
|
||||||
|
"\n",
|
||||||
|
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||||
|
" # If no min node count is provided it will use the scale settings for the cluster\n",
|
||||||
|
" compute_target.wait_for_completion(\n",
|
||||||
|
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
|
||||||
|
" )\n",
|
||||||
|
"\n",
|
||||||
|
" # For a more detailed view of current cluster status, use the 'status' property\n",
|
||||||
|
" print(compute_target.status.serialize())"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Set up training parameters\n",
|
||||||
|
"\n",
|
||||||
|
"We need to provide ``ForecastingParameters``, ``AutoMLConfig`` and ``HTSTrainParameters`` objects. For the forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, the hierarchy definition, and the level of the hierarchy at which to train.\n",
|
||||||
|
"\n",
|
||||||
|
"#### ``ForecastingParameters`` arguments\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
||||||
|
"| **time_column_name** | The name of your time column. |\n",
|
||||||
|
"| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
|
||||||
|
"| **cv_step_size** | Number of periods between two consecutive cross-validation folds. The default value is \\\"auto\\\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value. |\n",
|
||||||
|
"\n",
|
||||||
|
"#### ``AutoMLConfig`` arguments\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **task** | forecasting |\n",
|
||||||
|
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
|
||||||
|
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
|
||||||
|
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||||
|
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||||
|
"| **experiment_timeout_hours** | Maximum amount of time in hours that each experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. **It does not control the overall timeout for the pipeline run, instead controls the timeout for each training run per partitioned time series.** |\n",
|
||||||
|
"| **label_column_name** | The name of the label column. |\n",
|
||||||
|
"| **n_cross_validations** | Number of cross validation splits. The default value is \\\"auto\\\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
||||||
|
"| **enable_early_stopping** | Flag to enable early termination if the primary metric is no longer improving. |\n",
|
||||||
|
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
|
||||||
|
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
|
||||||
|
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
|
||||||
|
"| **model_explainability** | Flag to disable explaining the best automated ML model at the end of all training iterations. The default is True and will block non-explainable models which may impact the forecast accuracy. For more information, see [Interpretability: model explanations in automated machine learning](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-machine-learning-interpretability-automl). |\n",
|
||||||
|
"\n",
|
||||||
|
"#### ``HTSTrainParameters`` arguments\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **automl_settings** | The ``AutoMLConfig`` object defined above. |\n",
|
||||||
|
"| **hierarchy_column_names** | The names of columns that define the hierarchical structure of the data from highest level to most granular. |\n",
|
||||||
|
"| **training_level** | The level of the hierarchy to be used for training models. |\n",
|
||||||
|
"| **enable_engineered_explanations** | The switch controls engineered explanations. |"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1613007061544
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.train.automl.runtime._hts.hts_parameters import HTSTrainParameters\n",
|
||||||
|
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||||
|
"from azureml.train.automl.automlconfig import AutoMLConfig\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"model_explainability = True\n",
|
||||||
|
"\n",
|
||||||
|
"engineered_explanations = False\n",
|
||||||
|
"# Define your hierarchy. Adjust the settings below based on your dataset.\n",
|
||||||
|
"hierarchy = [\"state\", \"store_id\", \"product_category\", \"SKU\"]\n",
|
||||||
|
"training_level = \"SKU\"\n",
|
||||||
|
"\n",
|
||||||
|
"# Set your forecast parameters. Adjust the settings below based on your dataset.\n",
|
||||||
|
"time_column_name = \"date\"\n",
|
||||||
|
"label_column_name = \"quantity\"\n",
|
||||||
|
"forecast_horizon = 7\n",
|
||||||
|
"\n",
|
||||||
|
"forecasting_parameters = ForecastingParameters(\n",
|
||||||
|
" time_column_name=time_column_name,\n",
|
||||||
|
" forecast_horizon=forecast_horizon,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"automl_settings = AutoMLConfig(\n",
|
||||||
|
" task=\"forecasting\",\n",
|
||||||
|
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||||
|
" experiment_timeout_hours=1,\n",
|
||||||
|
" label_column_name=label_column_name,\n",
|
||||||
|
" track_child_runs=False,\n",
|
||||||
|
" forecasting_parameters=forecasting_parameters,\n",
|
||||||
|
" pipeline_fetch_max_batch_size=15,\n",
|
||||||
|
" model_explainability=model_explainability,\n",
|
||||||
|
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||||
|
" cv_step_size=\"auto\",\n",
|
||||||
|
" # The following settings are specific to this sample and should be adjusted according to your own needs.\n",
|
||||||
|
" iteration_timeout_minutes=10,\n",
|
||||||
|
" iterations=15,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"hts_parameters = HTSTrainParameters(\n",
|
||||||
|
" automl_settings=automl_settings,\n",
|
||||||
|
" hierarchy_column_names=hierarchy,\n",
|
||||||
|
" training_level=training_level,\n",
|
||||||
|
" enable_engineered_explanations=engineered_explanations,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Set up hierarchy training pipeline"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The ``process_count_per_node`` is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
|
||||||
|
"\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **experiment** | The experiment used for training. |\n",
|
||||||
|
"| **train_data** | The file dataset to be used as input to the training run. |\n",
|
||||||
|
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
|
||||||
|
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node for optimal performance. |\n",
|
||||||
|
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
|
||||||
|
"| **run_invocation_timeout** | Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. This must be greater than ``experiment_timeout_hours`` by at least 300 seconds. |\n",
|
||||||
|
"\n",
|
||||||
|
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.\n",
|
||||||
|
"\n",
|
||||||
|
"**Note**: Total time taken for the **training step** in the pipeline to complete = $ \\frac{t}{ p \\times n } \\times ts $\n",
|
||||||
|
"where,\n",
|
||||||
|
"- $ t $ is time taken for training one partition (can be viewed in the training logs)\n",
|
||||||
|
"- $ p $ is ``process_count_per_node``\n",
|
||||||
|
"- $ n $ is ``node_count``\n",
|
||||||
|
"- $ ts $ is total number of partitions in time series based on ``partition_column_names``"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
|
||||||
|
" experiment=experiment,\n",
|
||||||
|
" train_data=registered_train,\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" node_count=2,\n",
|
||||||
|
" process_count_per_node=8,\n",
|
||||||
|
" train_pipeline_parameters=hts_parameters,\n",
|
||||||
|
" run_invocation_timeout=3900,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.pipeline.core import Pipeline\n",
|
||||||
|
"\n",
|
||||||
|
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Submit the pipeline to run\n",
|
||||||
|
"Next we submit our pipeline to run. The whole training pipeline takes about 1h using a Standard_D16_V3 VM with our current ParallelRunConfig setting."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"training_run = experiment.submit(training_pipeline)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"training_run.wait_for_completion(show_output=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### [Optional] Get the explanations\n",
|
||||||
|
"First we need to download the explanations to the local disk."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"if model_explainability:\n",
|
||||||
|
" expl_output = training_run.get_pipeline_output(\"explanations\")\n",
|
||||||
|
" expl_output.download(\"training_explanations\")\n",
|
||||||
|
"else:\n",
|
||||||
|
" print(\n",
|
||||||
|
" \"Model explanations are available only if model_explainability is set to True.\"\n",
|
||||||
|
" )"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The explanations are downloaded to the \"training_explanations/azureml\" directory."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os\n",
|
||||||
|
"\n",
|
||||||
|
"if model_explainability:\n",
|
||||||
|
" explanations_dirrectory = os.listdir(\n",
|
||||||
|
" os.path.join(\"training_explanations\", \"azureml\")\n",
|
||||||
|
" )\n",
|
||||||
|
" if len(explanations_dirrectory) > 1:\n",
|
||||||
|
" print(\n",
|
||||||
|
" \"Warning! The directory contains multiple explanations, only the first one will be displayed.\"\n",
|
||||||
|
" )\n",
|
||||||
|
" print(\"The explanations are located at {}.\".format(explanations_dirrectory[0]))\n",
|
||||||
|
" # Now we will list all the explanations.\n",
|
||||||
|
" explanation_path = os.path.join(\n",
|
||||||
|
" \"training_explanations\",\n",
|
||||||
|
" \"azureml\",\n",
|
||||||
|
" explanations_dirrectory[0],\n",
|
||||||
|
" \"training_explanations\",\n",
|
||||||
|
" )\n",
|
||||||
|
" print(\"Available explanations\")\n",
|
||||||
|
" print(\"==============================\")\n",
|
||||||
|
" print(\"\\n\".join(os.listdir(explanation_path)))\n",
|
||||||
|
"else:\n",
|
||||||
|
" print(\n",
|
||||||
|
" \"Model explanations are available only if model_explainability is set to True.\"\n",
|
||||||
|
" )"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"View the explanations on \"state\" level."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from IPython.display import display\n",
|
||||||
|
"\n",
|
||||||
|
"explanation_type = \"raw\"\n",
|
||||||
|
"level = \"state\"\n",
|
||||||
|
"\n",
|
||||||
|
"if model_explainability:\n",
|
||||||
|
" display(\n",
|
||||||
|
" pd.read_csv(\n",
|
||||||
|
" os.path.join(explanation_path, \"{}_explanations_{}.csv\").format(\n",
|
||||||
|
" explanation_type, level\n",
|
||||||
|
" )\n",
|
||||||
|
" )\n",
|
||||||
|
" )"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## 5.0 Forecasting\n",
|
||||||
|
"For hierarchical forecasting we need to provide the HTSInferenceParameters object.\n",
|
||||||
|
"#### ``HTSInferenceParameters`` arguments\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **hierarchy_forecast_level:** | The default level of the hierarchy to produce prediction/forecast on. |\n",
|
||||||
|
"| **allocation_method:** | \\[Optional] The disaggregation method to use if the hierarchy forecast level specified is below the define hierarchy training level. <br><i>(average historical proportions) 'average_historical_proportions'</i><br><i>(proportions of the historical averages) 'proportions_of_historical_average'</i> |\n",
|
||||||
|
"\n",
|
||||||
|
"#### ``get_many_models_batch_inference_steps`` arguments\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **experiment** | The experiment used for inference run. |\n",
|
||||||
|
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
|
||||||
|
"| **compute_target** | The compute target that runs the inference pipeline. |\n",
|
||||||
|
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
|
||||||
|
"| **process_count_per_node** | \\[Optional] The number of processes per node. By default it's 2 (should be at most half of the number of cores in a single node of the compute cluster that will be used for the experiment).\n",
|
||||||
|
"| **inference_pipeline_parameters** | \\[Optional] The ``HTSInferenceParameters`` object defined above. |\n",
|
||||||
|
"| **train_run_id** | \\[Optional] The run id of the **training pipeline**. By default it is the latest successful training pipeline run in the experiment. |\n",
|
||||||
|
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
|
||||||
|
"| **run_invocation_timeout** | \\[Optional] Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. |"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.train.automl.runtime._hts.hts_parameters import HTSInferenceParameters\n",
|
||||||
|
"\n",
|
||||||
|
"inference_parameters = HTSInferenceParameters(\n",
|
||||||
|
" hierarchy_forecast_level=\"store_id\", # The setting is specific to this dataset and should be changed based on your dataset.\n",
|
||||||
|
" allocation_method=\"proportions_of_historical_average\",\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
|
||||||
|
" experiment=experiment,\n",
|
||||||
|
" inference_data=registered_inference,\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" inference_pipeline_parameters=inference_parameters,\n",
|
||||||
|
" node_count=2,\n",
|
||||||
|
" process_count_per_node=8,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.pipeline.core import Pipeline\n",
|
||||||
|
"\n",
|
||||||
|
"inference_pipeline = Pipeline(ws, steps=steps)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"inference_run = experiment.submit(inference_pipeline)\n",
|
||||||
|
"inference_run.wait_for_completion(show_output=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Retrieve results\n",
|
||||||
|
"\n",
|
||||||
|
"Forecast results can be retrieved through the following code. The prediction results summary and the actual predictions are downloaded in forecast_results folder"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"forecasts = inference_run.get_pipeline_output(\"forecasts\")\n",
|
||||||
|
"forecasts.download(\"forecast_results\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Resbumit the Pipeline\n",
|
||||||
|
"\n",
|
||||||
|
"The inference pipeline can be submitted with different configurations."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"inference_run = experiment.submit(\n",
|
||||||
|
" inference_pipeline, pipeline_parameters={\"hierarchy_forecast_level\": \"state\"}\n",
|
||||||
|
")\n",
|
||||||
|
"inference_run.wait_for_completion(show_output=False)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "jialiu"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"categories": [
|
||||||
|
"how-to-use-azureml",
|
||||||
|
"automated-machine-learning"
|
||||||
|
],
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.8 - AzureML",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python38-azureml"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.8.10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-hierarchical-timeseries
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -0,0 +1,122 @@
|
|||||||
|
---
|
||||||
|
page_type: sample
|
||||||
|
languages:
|
||||||
|
- python
|
||||||
|
products:
|
||||||
|
- azure-machine-learning
|
||||||
|
description: Tutorial showing how to solve a complex machine learning time series forecasting problems at scale by using Azure Automated ML and Many Models solution accelerator.
|
||||||
|
---
|
||||||
|
|
||||||
|

|
||||||
|
# Many Models Solution Accelerator
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Guidelines on README format: https://review.docs.microsoft.com/help/onboard/admin/samples/concepts/readme-template?branch=master
|
||||||
|
|
||||||
|
Guidance on onboarding samples to docs.microsoft.com/samples: https://review.docs.microsoft.com/help/onboard/admin/samples/process/onboarding?branch=master
|
||||||
|
|
||||||
|
Taxonomies for products and languages: https://review.docs.microsoft.com/new-hope/information-architecture/metadata/taxonomies?branch=master
|
||||||
|
-->
|
||||||
|
|
||||||
|
In the real world, many problems can be too complex to be solved by a single machine learning model. Whether that be predicting sales for each individual store, building a predictive maintanence model for hundreds of oil wells, or tailoring an experience to individual users, building a model for each instance can lead to improved results on many machine learning problems.
|
||||||
|
|
||||||
|
This Pattern is very common across a wide variety of industries and applicable to many real world use cases. Below are some examples we have seen where this pattern is being used.
|
||||||
|
|
||||||
|
- Energy and utility companies building predictive maintenance models for thousands of oil wells, hundreds of wind turbines or hundreds of smart meters
|
||||||
|
|
||||||
|
- Retail organizations building workforce optimization models for thousands of stores, campaign promotion propensity models, Price optimization models for hundreds of thousands of products they sell
|
||||||
|
|
||||||
|
- Restaurant chains building demand forecasting models across thousands of restaurants
|
||||||
|
|
||||||
|
- Banks and financial institutes building models for cash replenishment for ATM Machine and for several ATMs or building personalized models for individuals
|
||||||
|
|
||||||
|
- Enterprises building revenue forecasting models at each division level
|
||||||
|
|
||||||
|
- Document management companies building text analytics and legal document search models per each state
|
||||||
|
|
||||||
|
Azure Machine Learning (AML) makes it easy to train, operate, and manage hundreds or even thousands of models. This repo will walk you through the end to end process of creating a many models solution from training to scoring to monitoring.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
To use this solution accelerator, all you need is access to an [Azure subscription](https://azure.microsoft.com/free/) and an [Azure Machine Learning Workspace](https://docs.microsoft.com/azure/machine-learning/how-to-manage-workspace) that you'll create below.
|
||||||
|
|
||||||
|
While it's not required, a basic understanding of Azure Machine Learning will be helpful for understanding the solution. The following resources can help introduce you to AML:
|
||||||
|
|
||||||
|
1. [Azure Machine Learning Overview](https://azure.microsoft.com/services/machine-learning/)
|
||||||
|
2. [Azure Machine Learning Tutorials](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup)
|
||||||
|
3. [Azure Machine Learning Sample Notebooks on Github](https://github.com/Azure/azureml-examples)
|
||||||
|
|
||||||
|
## Getting started
|
||||||
|
|
||||||
|
### 1. Deploy Resources
|
||||||
|
|
||||||
|
Start by deploying the resources to Azure. The button below will deploy Azure Machine Learning and its related resources:
|
||||||
|
|
||||||
|
<a href="https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2Fmicrosoft%2Fsolution-accelerator-many-models%2Fmaster%2Fazuredeploy.json" target="_blank">
|
||||||
|
<img src="http://azuredeploy.net/deploybutton.png"/>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
### 2. Configure Development Environment
|
||||||
|
|
||||||
|
Next you'll need to configure your [development environment](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment) for Azure Machine Learning. We recommend using a [Compute Instance](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment#compute-instance) as it's the fastest way to get up and running.
|
||||||
|
|
||||||
|
### 3. Run Notebooks
|
||||||
|
|
||||||
|
Once your development environment is set up, run through the Jupyter Notebooks sequentially following the steps outlined. By the end, you'll know how to train, score, and make predictions using the many models pattern on Azure Machine Learning.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
## Contents
|
||||||
|
|
||||||
|
In this repo, you'll train and score a forecasting model for each orange juice brand and for each store at a (simulated) grocery chain. By the end, you'll have forecasted sales by using up to 11,973 models to predict sales for the next few weeks.
|
||||||
|
|
||||||
|
The data used in this sample is simulated based on the [Dominick's Orange Juice Dataset](http://www.cs.unitn.it/~taufer/QMMA/L10-OJ-Data.html#(1)), sales data from a Chicago area grocery store.
|
||||||
|
|
||||||
|
<img src="images/Flow_map.png" width="1000">
|
||||||
|
|
||||||
|
### Using Automated ML to train the models:
|
||||||
|
|
||||||
|
The [`auto-ml-forecasting-many-models.ipynb`](./auto-ml-forecasting-many-models.ipynb) noteboook is a guided solution accelerator that demonstrates steps from data preparation, to model training, and forecasting on train models as well as operationalizing the solution.
|
||||||
|
|
||||||
|
## How-to-videos
|
||||||
|
|
||||||
|
Watch these how-to-videos for a step by step walk-through of the many model solution accelerator to learn how to setup your models using Automated ML.
|
||||||
|
|
||||||
|
### Automated ML
|
||||||
|
|
||||||
|
[](https://channel9.msdn.com/Shows/Docs-AI/Building-Large-Scale-Machine-Learning-Forecasting-Models-using-Azure-Machine-Learnings-Automated-ML)
|
||||||
|
|
||||||
|
## Key concepts
|
||||||
|
|
||||||
|
### ParallelRunStep
|
||||||
|
|
||||||
|
[ParallelRunStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.parallel_run_step.parallelrunstep?view=azure-ml-py) enables the parallel training of models and is commonly used for batch inferencing. This [document](https://docs.microsoft.com/azure/machine-learning/how-to-use-parallel-run-step) walks through some of the key concepts around ParallelRunStep.
|
||||||
|
|
||||||
|
### Pipelines
|
||||||
|
|
||||||
|
[Pipelines](https://docs.microsoft.com/azure/machine-learning/concept-ml-pipelines) allow you to create workflows in your machine learning projects. These workflows have a number of benefits including speed, simplicity, repeatability, and modularity.
|
||||||
|
|
||||||
|
### Automated Machine Learning
|
||||||
|
|
||||||
|
[Automated Machine Learning](https://docs.microsoft.com/azure/machine-learning/concept-automated-ml) also referred to as automated ML or AutoML, is the process of automating the time consuming, iterative tasks of machine learning model development. It allows data scientists, analysts, and developers to build ML models with high scale, efficiency, and productivity all while sustaining model quality.
|
||||||
|
|
||||||
|
### Other Concepts
|
||||||
|
|
||||||
|
In additional to ParallelRunStep, Pipelines and Automated Machine Learning, you'll also be working with the following concepts including [workspace](https://docs.microsoft.com/azure/machine-learning/concept-workspace), [datasets](https://docs.microsoft.com/azure/machine-learning/concept-data#datasets), [compute targets](https://docs.microsoft.com/azure/machine-learning/concept-compute-target#train), [python script steps](https://docs.microsoft.com/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py), and [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/).
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
This project welcomes contributions and suggestions. To learn more visit the [contributing](../../../CONTRIBUTING.md) section.
|
||||||
|
|
||||||
|
Most contributions require you to agree to a Contributor License Agreement (CLA)
|
||||||
|
declaring that you have the right to, and actually do, grant us
|
||||||
|
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
|
||||||
|
|
||||||
|
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
|
||||||
|
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
|
||||||
|
provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||||
|
|
||||||
|
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||||
|
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||||
|
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||||
@@ -0,0 +1,898 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<font color=\"red\" size=\"5\"><strong>!Important!</strong> </br>This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/pipelines/1k_demand_forecasting_with_pipeline_components/automl-forecasting-demand-many-models-in-pipeline)).</font>"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Many Models - Automated ML\n",
|
||||||
|
"**_Generate many models time series forecasts with Automated Machine Learning_**\n",
|
||||||
|
"\n",
|
||||||
|
"---"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
|
||||||
|
"\n",
|
||||||
|
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Prerequisites\n",
|
||||||
|
"You'll need to create a compute Instance by following [these](https://learn.microsoft.com/en-us/azure/machine-learning/v1/how-to-create-manage-compute-instance?tabs=python) instructions."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## 1.0 Set up workspace, datastore, experiment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1613003526897
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import azureml.core\n",
|
||||||
|
"from azureml.core import Workspace, Datastore\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"\n",
|
||||||
|
"# Set up your workspace\n",
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"ws.get_details()\n",
|
||||||
|
"\n",
|
||||||
|
"# Set up your datastores\n",
|
||||||
|
"dstore = ws.get_default_datastore()\n",
|
||||||
|
"\n",
|
||||||
|
"output = {}\n",
|
||||||
|
"output[\"SDK version\"] = azureml.core.VERSION\n",
|
||||||
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
|
"output[\"Location\"] = ws.location\n",
|
||||||
|
"output[\"Default datastore name\"] = dstore.name\n",
|
||||||
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
|
"outputDf.T"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Choose an experiment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1613003540729
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core import Experiment\n",
|
||||||
|
"\n",
|
||||||
|
"experiment = Experiment(ws, \"automl-many-models\")\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"Experiment name: \" + experiment.name)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## 2.0 Data\n",
|
||||||
|
"\n",
|
||||||
|
"This notebook uses simulated orange juice sales data to walk you through the process of training many models on Azure Machine Learning using Automated ML. \n",
|
||||||
|
"\n",
|
||||||
|
"The time series data used in this example was simulated based on the University of Chicago's Dominick's Finer Foods dataset which featured two years of sales of 3 different orange juice brands for individual stores. The full simulated dataset includes 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern.\n",
|
||||||
|
"\n",
|
||||||
|
" \n",
|
||||||
|
"In this notebook, two datasets will be created: one with all 11,973 files and one with only 10 files that can be used to quickly test and debug. For each dataset, you'll be walked through the process of:\n",
|
||||||
|
"\n",
|
||||||
|
"1. Registering the blob container as a Datastore to the Workspace\n",
|
||||||
|
"2. Registering a tabular dataset to the Workspace"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"nteract": {
|
||||||
|
"transient": {
|
||||||
|
"deleting": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"### 2.1 Data Preparation\n",
|
||||||
|
"The OJ data is available in the public blob container. The data is split to be used for training and for inferencing. For the current dataset, the data was split on time column ('WeekStarting') before and after '1992-5-28' .\n",
|
||||||
|
"\n",
|
||||||
|
"The container has\n",
|
||||||
|
"<ol>\n",
|
||||||
|
" <li><b>'oj-data-tabular'</b> and <b>'oj-inference-tabular'</b> folders that contains training and inference data respectively for the 11,973 models. </li>\n",
|
||||||
|
" <li>It also has <b>'oj-data-small-tabular'</b> and <b>'oj-inference-small-tabular'</b> folders that has training and inference data for 10 models.</li>\n",
|
||||||
|
"</ol>\n",
|
||||||
|
"\n",
|
||||||
|
"To create the [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) needed for the ParallelRunStep, you first need to register the blob container to the workspace."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"nteract": {
|
||||||
|
"transient": {
|
||||||
|
"deleting": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"<b> To use your own data, put your own data in a blobstore folder. As shown it can be one file or multiple files. We can then register datastore using that blob as shown below.\n",
|
||||||
|
" \n",
|
||||||
|
"<h3> How sample data in blob store looks like</h3>\n",
|
||||||
|
"\n",
|
||||||
|
"['oj-data-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)</b>\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"['oj-inference-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"['oj-data-small-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"['oj-inference-small-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### 2.2 Register the blob container as DataStore\n",
|
||||||
|
"\n",
|
||||||
|
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
|
||||||
|
"\n",
|
||||||
|
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.\n",
|
||||||
|
"\n",
|
||||||
|
"In this next step, we will be registering blob storage as datastore to the Workspace."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core import Datastore\n",
|
||||||
|
"\n",
|
||||||
|
"# Please change the following to point to your own blob container and pass in account_key\n",
|
||||||
|
"blob_datastore_name = \"automl_many_models\"\n",
|
||||||
|
"container_name = \"automl-sample-notebook-data\"\n",
|
||||||
|
"account_name = \"automlsamplenotebookdata\"\n",
|
||||||
|
"\n",
|
||||||
|
"oj_datastore = Datastore.register_azure_blob_container(\n",
|
||||||
|
" workspace=ws,\n",
|
||||||
|
" datastore_name=blob_datastore_name,\n",
|
||||||
|
" container_name=container_name,\n",
|
||||||
|
" account_name=account_name,\n",
|
||||||
|
" create_if_not_exists=True,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### 2.3 Using tabular datasets \n",
|
||||||
|
"\n",
|
||||||
|
"Now that the datastore is available from the Workspace, [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) can be created. Datasets in Azure Machine Learning are references to specific data in a Datastore. We are using TabularDataset, so that users who have their data which can be in one or many files (*.parquet or *.csv) and have not split up data according to group columns needed for training, can do so using out of box support for 'partiion_by' feature of TabularDataset shown in section 5.0 below."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1613007017296
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core import Dataset\n",
|
||||||
|
"\n",
|
||||||
|
"ds_name_small = \"oj-data-small-tabular\"\n",
|
||||||
|
"input_ds_small = Dataset.Tabular.from_delimited_files(\n",
|
||||||
|
" path=oj_datastore.path(ds_name_small + \"/\"), validate=False\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"inference_name_small = \"oj-inference-small-tabular\"\n",
|
||||||
|
"inference_ds_small = Dataset.Tabular.from_delimited_files(\n",
|
||||||
|
" path=oj_datastore.path(inference_name_small + \"/\"), validate=False\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### 2.4 Configure data with ``OutputFileDatasetConfig`` objects\n",
|
||||||
|
"This step shows how to configure output data from a pipeline step. One of the use cases for this step is when you want to do some preprocessing before feeding the data to training step. Intermediate data (or output of a step) is represented by an ``OutputFileDatasetConfig`` object. ``output_data`` is produced as the output of a step. Optionally, this data can be registered as a dataset by calling the ``register_on_complete`` method. If you create an ``OutputFileDatasetConfig`` in one step and use it as an input to another step, that data dependency between steps creates an implicit execution order in the pipeline.\n",
|
||||||
|
"\n",
|
||||||
|
"``OutputFileDatasetConfig`` objects return a directory, and by default write output to the default datastore of the workspace.\n",
|
||||||
|
"\n",
|
||||||
|
"Since instance creation for class ``OutputTabularDatasetConfig`` is not allowed, we first create an instance of this class. Then we use the ``read_parquet_files`` method to read the parquet file into ``OutputTabularDatasetConfig``."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.data.output_dataset_config import OutputFileDatasetConfig\n",
|
||||||
|
"\n",
|
||||||
|
"output_data = OutputFileDatasetConfig(\n",
|
||||||
|
" name=\"processed_data\", destination=(dstore, \"outputdataset/{run-id}/{output-name}\")\n",
|
||||||
|
").as_upload()\n",
|
||||||
|
"# output_data_dataset = output_data.register_on_complete(\n",
|
||||||
|
"# name='processed_data', description = 'files from prev step')\n",
|
||||||
|
"output_data = output_data.read_parquet_files()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## 3.0 Build the training pipeline\n",
|
||||||
|
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Choose a compute target\n",
|
||||||
|
"\n",
|
||||||
|
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
|
||||||
|
"\n",
|
||||||
|
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1613007037308
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
|
"\n",
|
||||||
|
"# Name your cluster\n",
|
||||||
|
"compute_name = \"mm-compute-v1\"\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"if compute_name in ws.compute_targets:\n",
|
||||||
|
" compute_target = ws.compute_targets[compute_name]\n",
|
||||||
|
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||||
|
" print(\"Found compute target: \" + compute_name)\n",
|
||||||
|
"else:\n",
|
||||||
|
" print(\"Creating a new compute target...\")\n",
|
||||||
|
" provisioning_config = AmlCompute.provisioning_configuration(\n",
|
||||||
|
" vm_size=\"STANDARD_D14_V2\", max_nodes=20\n",
|
||||||
|
" )\n",
|
||||||
|
" # Create the compute target\n",
|
||||||
|
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
|
||||||
|
"\n",
|
||||||
|
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||||
|
" # If no min node count is provided it will use the scale settings for the cluster\n",
|
||||||
|
" compute_target.wait_for_completion(\n",
|
||||||
|
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
|
||||||
|
" )\n",
|
||||||
|
"\n",
|
||||||
|
" # For a more detailed view of current cluster status, use the 'status' property\n",
|
||||||
|
" print(compute_target.status.serialize())"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Configure the training run's environment\n",
|
||||||
|
"The next step is making sure that the remote training run has all the dependencies needed by the training steps. Dependencies and the runtime context are set by creating and configuring a RunConfiguration object.\n",
|
||||||
|
"\n",
|
||||||
|
"The code below shows two options for handling dependencies. As presented, with ``USE_CURATED_ENV = True``, the configuration is based on a [curated environment](https://docs.microsoft.com/en-us/azure/machine-learning/resource-curated-environments). Curated environments have prebuilt Docker images in the [Microsoft Container Registry](https://hub.docker.com/publishers/microsoftowner). For more information, see [Azure Machine Learning curated environments](https://docs.microsoft.com/en-us/azure/machine-learning/resource-curated-environments).\n",
|
||||||
|
"\n",
|
||||||
|
"The path taken if you change ``USE_CURATED_ENV`` to False shows the pattern for explicitly setting your dependencies. In that scenario, a new custom Docker image will be created and registered in an Azure Container Registry within your resource group (see [Introduction to private Docker container registries in Azure](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-intro)). Building and registering this image can take quite a few minutes."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.runconfig import RunConfiguration\n",
|
||||||
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||||
|
"from azureml.core import Environment\n",
|
||||||
|
"\n",
|
||||||
|
"aml_run_config = RunConfiguration()\n",
|
||||||
|
"aml_run_config.target = compute_target\n",
|
||||||
|
"\n",
|
||||||
|
"USE_CURATED_ENV = True\n",
|
||||||
|
"if USE_CURATED_ENV:\n",
|
||||||
|
" curated_environment = Environment.get(\n",
|
||||||
|
" workspace=ws, name=\"AzureML-sklearn-0.24-ubuntu18.04-py37-cpu\"\n",
|
||||||
|
" )\n",
|
||||||
|
" aml_run_config.environment = curated_environment\n",
|
||||||
|
"else:\n",
|
||||||
|
" aml_run_config.environment.python.user_managed_dependencies = False\n",
|
||||||
|
"\n",
|
||||||
|
" # Add some packages relied on by data prep step\n",
|
||||||
|
" aml_run_config.environment.python.conda_dependencies = CondaDependencies.create(\n",
|
||||||
|
" conda_packages=[\"pandas\", \"scikit-learn\"],\n",
|
||||||
|
" pip_packages=[\"azureml-sdk\", \"azureml-dataset-runtime[fuse,pandas]\"],\n",
|
||||||
|
" pin_sdk_version=False,\n",
|
||||||
|
" )"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Set up training parameters\n",
|
||||||
|
"\n",
|
||||||
|
"We need to provide ``ForecastingParameters``, ``AutoMLConfig`` and ``ManyModelsTrainParameters`` objects. For the forecasting task we also need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name(s) definition.\n",
|
||||||
|
"\n",
|
||||||
|
"#### ``ForecastingParameters`` arguments\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
||||||
|
"| **time_column_name** | The name of your time column. |\n",
|
||||||
|
"| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
|
||||||
|
"| **cv_step_size** | Number of periods between two consecutive cross-validation folds. The default value is \\\"auto\\\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value. |\n",
|
||||||
|
"\n",
|
||||||
|
"#### ``AutoMLConfig`` arguments\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **task** | forecasting |\n",
|
||||||
|
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
|
||||||
|
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
|
||||||
|
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||||
|
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||||
|
"| **experiment_timeout_hours** | Maximum amount of time in hours that each experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. **It does not control the overall timeout for the pipeline run, instead controls the timeout for each training run per partitioned time series.** |\n",
|
||||||
|
"| **label_column_name** | The name of the label column. |\n",
|
||||||
|
"| **n_cross_validations** | Number of cross validation splits. The default value is \\\"auto\\\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
||||||
|
"| **enable_early_stopping** | Flag to enable early termination if the primary metric is no longer improving. |\n",
|
||||||
|
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
|
||||||
|
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
|
||||||
|
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"#### ``ManyModelsTrainParameters`` arguments\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **automl_settings** | The ``AutoMLConfig`` object defined above. |\n",
|
||||||
|
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"gather": {
|
||||||
|
"logged": 1613007061544
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
|
||||||
|
" ManyModelsTrainParameters,\n",
|
||||||
|
")\n",
|
||||||
|
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||||
|
"from azureml.train.automl.automlconfig import AutoMLConfig\n",
|
||||||
|
"\n",
|
||||||
|
"partition_column_names = [\"Store\", \"Brand\"]\n",
|
||||||
|
"\n",
|
||||||
|
"forecasting_parameters = ForecastingParameters(\n",
|
||||||
|
" time_column_name=\"WeekStarting\",\n",
|
||||||
|
" forecast_horizon=6,\n",
|
||||||
|
" time_series_id_column_names=partition_column_names,\n",
|
||||||
|
" cv_step_size=\"auto\",\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"automl_settings = AutoMLConfig(\n",
|
||||||
|
" task=\"forecasting\",\n",
|
||||||
|
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||||
|
" iteration_timeout_minutes=10,\n",
|
||||||
|
" iterations=15,\n",
|
||||||
|
" experiment_timeout_hours=0.25,\n",
|
||||||
|
" label_column_name=\"Quantity\",\n",
|
||||||
|
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||||
|
" track_child_runs=False,\n",
|
||||||
|
" forecasting_parameters=forecasting_parameters,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"mm_paramters = ManyModelsTrainParameters(\n",
|
||||||
|
" automl_settings=automl_settings, partition_column_names=partition_column_names\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Construct your pipeline steps\n",
|
||||||
|
"Once you have the compute resource and environment created, you're ready to define your pipeline's steps. There are many built-in steps available via the Azure Machine Learning SDK, as you can see on the [reference documentation for the azureml.pipeline.steps package](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps?view=azure-ml-py). The most flexible class is [PythonScriptStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py), which runs a Python script.\n",
|
||||||
|
"\n",
|
||||||
|
"Your data preparation code is in a subdirectory (in this example, \"data_preprocessing_tabular.py\" in the directory \"./scripts\"). As part of the pipeline creation process, this directory is zipped and uploaded to the compute_target and the step runs the script specified as the value for ``script_name``.\n",
|
||||||
|
"\n",
|
||||||
|
"The ``arguments`` values specify the inputs and outputs of the step. In the example below, the baseline data is the ``input_ds_small`` dataset. The script data_preprocessing_tabular.py does whatever data-transformation tasks are appropriate to the task at hand and outputs the data to ``output_data``, of type ``OutputFileDatasetConfig``. For more information, see [Moving data into and between ML pipeline steps (Python)](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-move-data-in-out-of-pipelines). The step will run on the machine defined by ``compute_target``, using the configuration ``aml_run_config``.\n",
|
||||||
|
"\n",
|
||||||
|
"Reuse of previous results (``allow_reuse``) is key when using pipelines in a collaborative environment since eliminating unnecessary reruns offers agility. Reuse is the default behavior when the ``script_name``, ``inputs``, and the parameters of a step remain the same. When reuse is allowed, results from the previous run are immediately sent to the next step. If ``allow_reuse`` is set to False, a new run will always be generated for this step during pipeline execution.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that we only support partitioned FileDataset and TabularDataset without partition when using such output as input.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that we **drop column** \"Revenue\" from the dataset in this step to avoid information leak as \"Quantity\" = \"Revenue\" / \"Price\". **Please modify the logic based on your data**."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.pipeline.steps import PythonScriptStep\n",
|
||||||
|
"\n",
|
||||||
|
"dataprep_source_dir = \"./scripts\"\n",
|
||||||
|
"entry_point = \"data_preprocessing_tabular.py\"\n",
|
||||||
|
"ds_input = input_ds_small.as_named_input(\"train_10_models\")\n",
|
||||||
|
"\n",
|
||||||
|
"data_prep_step = PythonScriptStep(\n",
|
||||||
|
" script_name=entry_point,\n",
|
||||||
|
" source_directory=dataprep_source_dir,\n",
|
||||||
|
" arguments=[\"--input\", ds_input, \"--output\", output_data],\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" runconfig=aml_run_config,\n",
|
||||||
|
" allow_reuse=False,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"input_ds_small = output_data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Set up many models pipeline"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The ``process_count_per_node`` is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
|
||||||
|
"\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **experiment** | The experiment used for training. |\n",
|
||||||
|
"| **train_data** | The file dataset to be used as input to the training run. |\n",
|
||||||
|
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
|
||||||
|
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node for optimal performance. |\n",
|
||||||
|
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
|
||||||
|
"| **run_invocation_timeout** | Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. This must be greater than ``experiment_timeout_hours`` by at least 300 seconds. |\n",
|
||||||
|
"\n",
|
||||||
|
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.\n",
|
||||||
|
"\n",
|
||||||
|
"**Note**: Total time taken for the **training step** in the pipeline to complete = $ \\frac{t}{ p \\times n } \\times ts $\n",
|
||||||
|
"where,\n",
|
||||||
|
"- $ t $ is time taken for training one partition (can be viewed in the training logs)\n",
|
||||||
|
"- $ p $ is ``process_count_per_node``\n",
|
||||||
|
"- $ n $ is ``node_count``\n",
|
||||||
|
"- $ ts $ is total number of partitions in time series based on ``partition_column_names``"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
|
||||||
|
" experiment=experiment,\n",
|
||||||
|
" train_data=input_ds_small,\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" node_count=2,\n",
|
||||||
|
" process_count_per_node=8,\n",
|
||||||
|
" run_invocation_timeout=1200,\n",
|
||||||
|
" train_pipeline_parameters=mm_paramters,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.pipeline.core import Pipeline\n",
|
||||||
|
"\n",
|
||||||
|
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Submit the pipeline to run\n",
|
||||||
|
"Next we submit our pipeline to run. The whole training pipeline takes about 40m using a STANDARD_D16S_V3 VM with our current ParallelRunConfig setting."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"training_run = experiment.submit(training_pipeline)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"training_run.wait_for_completion(show_output=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## 5.0 Publish and schedule the train pipeline (Optional)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### 5.1 Publish the pipeline\n",
|
||||||
|
"\n",
|
||||||
|
"Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# published_pipeline = training_pipeline.publish(name = 'automl_train_many_models',\n",
|
||||||
|
"# description = 'train many models',\n",
|
||||||
|
"# version = '1',\n",
|
||||||
|
"# continue_on_step_failure = False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### 5.2 Schedule the pipeline\n",
|
||||||
|
"You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
|
||||||
|
"\n",
|
||||||
|
"# training_pipeline_id = published_pipeline.id\n",
|
||||||
|
"\n",
|
||||||
|
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
|
||||||
|
"# recurring_schedule = Schedule.create(ws, name=\"automl_training_recurring_schedule\",\n",
|
||||||
|
"# description=\"Schedule Training Pipeline to run on the first day of every month\",\n",
|
||||||
|
"# pipeline_id=training_pipeline_id,\n",
|
||||||
|
"# experiment_name=experiment.name,\n",
|
||||||
|
"# recurrence=recurrence)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## 6.0 Forecasting"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Set up output dataset for inference data\n",
|
||||||
|
"Output of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset. "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.data import OutputFileDatasetConfig\n",
|
||||||
|
"\n",
|
||||||
|
"output_inference_data_ds = OutputFileDatasetConfig(\n",
|
||||||
|
" name=\"many_models_inference_output\", destination=(dstore, \"oj/inference_data/\")\n",
|
||||||
|
").register_on_complete(name=\"oj_inference_data_ds\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"For many models we need to provide the ManyModelsInferenceParameters object.\n",
|
||||||
|
"\n",
|
||||||
|
"#### ``ManyModelsInferenceParameters`` arguments\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **partition_column_names** | List of column names that identifies groups. |\n",
|
||||||
|
"| **target_column_name** | \\[Optional] Column name only if the inference dataset has the target. |\n",
|
||||||
|
"| **time_column_name** | \\[Optional] Time column name only if it is timeseries. |\n",
|
||||||
|
"| **inference_type** | \\[Optional] Which inference method to use on the model. Possible values are 'forecast', 'predict_proba', and 'predict'. |\n",
|
||||||
|
"| **forecast_mode** | \\[Optional] The type of forecast to be used, either 'rolling' or 'recursive'; defaults to 'recursive'. |\n",
|
||||||
|
"| **step** | \\[Optional] Number of periods to advance the forecasting window in each iteration **(for rolling forecast only)**; defaults to 1. |\n",
|
||||||
|
"\n",
|
||||||
|
"#### ``get_many_models_batch_inference_steps`` arguments\n",
|
||||||
|
"| Property | Description|\n",
|
||||||
|
"| :--------------- | :------------------- |\n",
|
||||||
|
"| **experiment** | The experiment used for inference run. |\n",
|
||||||
|
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
|
||||||
|
"| **compute_target** | The compute target that runs the inference pipeline. |\n",
|
||||||
|
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
|
||||||
|
"| **process_count_per_node** | \\[Optional] The number of processes per node. By default it's 2 (should be at most half of the number of cores in a single node of the compute cluster that will be used for the experiment).\n",
|
||||||
|
"| **inference_pipeline_parameters** | \\[Optional] The ``ManyModelsInferenceParameters`` object defined above. |\n",
|
||||||
|
"| **append_row_file_name** | \\[Optional] The name of the output file (optional, default value is 'parallel_run_step.txt'). Supports 'txt' and 'csv' file extension. A 'txt' file extension generates the output in 'txt' format with space as separator without column names. A 'csv' file extension generates the output in 'csv' format with comma as separator and with column names. |\n",
|
||||||
|
"| **train_run_id** | \\[Optional] The run id of the **training pipeline**. By default it is the latest successful training pipeline run in the experiment. |\n",
|
||||||
|
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
|
||||||
|
"| **run_invocation_timeout** | \\[Optional] Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||||
|
"| **output_datastore** | \\[Optional] The ``Datastore`` or ``OutputDatasetConfig`` to be used for output. If specified any pipeline output will be written to that location. If unspecified the default datastore will be used. |\n",
|
||||||
|
"| **arguments** | \\[Optional] Arguments to be passed to inference script. Possible argument is '--forecast_quantiles' followed by quantile values. |"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||||
|
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
|
||||||
|
" ManyModelsInferenceParameters,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"mm_parameters = ManyModelsInferenceParameters(\n",
|
||||||
|
" partition_column_names=[\"Store\", \"Brand\"],\n",
|
||||||
|
" time_column_name=\"WeekStarting\",\n",
|
||||||
|
" target_column_name=\"Quantity\",\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"output_file_name = \"parallel_run_step.csv\"\n",
|
||||||
|
"\n",
|
||||||
|
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
|
||||||
|
" experiment=experiment,\n",
|
||||||
|
" inference_data=inference_ds_small,\n",
|
||||||
|
" node_count=2,\n",
|
||||||
|
" process_count_per_node=8,\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" run_invocation_timeout=300,\n",
|
||||||
|
" output_datastore=output_inference_data_ds,\n",
|
||||||
|
" train_run_id=training_run.id,\n",
|
||||||
|
" train_experiment_name=training_run.experiment.name,\n",
|
||||||
|
" inference_pipeline_parameters=mm_parameters,\n",
|
||||||
|
" append_row_file_name=output_file_name,\n",
|
||||||
|
" arguments=[\"--forecast_quantiles\", 0.1, 0.9],\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.pipeline.core import Pipeline\n",
|
||||||
|
"\n",
|
||||||
|
"inference_pipeline = Pipeline(ws, steps=inference_steps)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"inference_run = experiment.submit(inference_pipeline)\n",
|
||||||
|
"inference_run.wait_for_completion(show_output=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Retrieve results\n",
|
||||||
|
"\n",
|
||||||
|
"The forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. \n",
|
||||||
|
"\n",
|
||||||
|
"The following code snippet:\n",
|
||||||
|
"1. Downloads the contents of the output folder that is passed in the parallel run step \n",
|
||||||
|
"2. Reads the output file that has the predictions as pandas dataframe and \n",
|
||||||
|
"3. Displays the top 10 rows of the predictions"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n",
|
||||||
|
"\n",
|
||||||
|
"forecasting_results_name = \"forecasting_results\"\n",
|
||||||
|
"forecasting_output_name = \"many_models_inference_output\"\n",
|
||||||
|
"forecast_file = get_output_from_mm_pipeline(\n",
|
||||||
|
" inference_run, forecasting_results_name, forecasting_output_name, output_file_name\n",
|
||||||
|
")\n",
|
||||||
|
"df = pd.read_csv(forecast_file)\n",
|
||||||
|
"print(\n",
|
||||||
|
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
|
||||||
|
")\n",
|
||||||
|
"df.head(10)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## 7.0 Publish and schedule the inference pipeline (Optional)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### 7.1 Publish the pipeline\n",
|
||||||
|
"\n",
|
||||||
|
"Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# published_pipeline_inf = inference_pipeline.publish(name = 'automl_forecast_many_models',\n",
|
||||||
|
"# description = 'forecast many models',\n",
|
||||||
|
"# version = '1',\n",
|
||||||
|
"# continue_on_step_failure = False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### 7.2 Schedule the pipeline\n",
|
||||||
|
"You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
|
||||||
|
"\n",
|
||||||
|
"# forecasting_pipeline_id = published_pipeline.id\n",
|
||||||
|
"\n",
|
||||||
|
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
|
||||||
|
"# recurring_schedule = Schedule.create(ws, name=\"automl_forecasting_recurring_schedule\",\n",
|
||||||
|
"# description=\"Schedule Forecasting Pipeline to run on the first day of every week\",\n",
|
||||||
|
"# pipeline_id=forecasting_pipeline_id,\n",
|
||||||
|
"# experiment_name=experiment.name,\n",
|
||||||
|
"# recurrence=recurrence)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "jialiu"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"categories": [
|
||||||
|
"how-to-use-azureml",
|
||||||
|
"automated-machine-learning"
|
||||||
|
],
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.8 - AzureML",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python38-azureml"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.8.10"
|
||||||
|
},
|
||||||
|
"vscode": {
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-many-models
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
After Width: | Height: | Size: 32 KiB |
|
After Width: | Height: | Size: 306 KiB |
|
After Width: | Height: | Size: 2.6 MiB |
|
After Width: | Height: | Size: 106 KiB |
|
After Width: | Height: | Size: 158 KiB |
|
After Width: | Height: | Size: 80 KiB |
|
After Width: | Height: | Size: 68 KiB |
|
After Width: | Height: | Size: 631 KiB |
|
After Width: | Height: | Size: 176 KiB |
|
After Width: | Height: | Size: 165 KiB |
|
After Width: | Height: | Size: 162 KiB |
|
After Width: | Height: | Size: 166 KiB |
@@ -0,0 +1,39 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
from azureml.core import Run
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def main(args):
|
||||||
|
output = Path(args.output)
|
||||||
|
output.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
run_context = Run.get_context()
|
||||||
|
input_path = run_context.input_datasets["train_10_models"]
|
||||||
|
|
||||||
|
for file_name in os.listdir(input_path):
|
||||||
|
input_file = os.path.join(input_path, file_name)
|
||||||
|
with open(input_file, "r") as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
# Apply any data pre-processing techniques here
|
||||||
|
|
||||||
|
output_file = os.path.join(output, file_name)
|
||||||
|
with open(output_file, "w") as f:
|
||||||
|
f.write(content)
|
||||||
|
|
||||||
|
|
||||||
|
def my_parse_args():
|
||||||
|
parser = argparse.ArgumentParser("Test")
|
||||||
|
|
||||||
|
parser.add_argument("--input", type=str)
|
||||||
|
parser.add_argument("--output", type=str)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
args = my_parse_args()
|
||||||
|
main(args)
|
||||||
@@ -0,0 +1,37 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
from azureml.core import Run
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
|
||||||
|
def main(args):
|
||||||
|
output = Path(args.output)
|
||||||
|
output.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
run_context = Run.get_context()
|
||||||
|
dataset = run_context.input_datasets["train_10_models"]
|
||||||
|
df = dataset.to_pandas_dataframe()
|
||||||
|
|
||||||
|
# Drop the column "Revenue" from the dataset to avoid information leak as
|
||||||
|
# "Quantity" = "Revenue" / "Price". Please modify the logic based on your data.
|
||||||
|
drop_column_name = "Revenue"
|
||||||
|
if drop_column_name in df.columns:
|
||||||
|
df.drop(drop_column_name, axis=1, inplace=True)
|
||||||
|
|
||||||
|
# Apply any data pre-processing techniques here
|
||||||
|
|
||||||
|
df.to_parquet(output / "data_prepared_result.parquet", compression=None)
|
||||||
|
|
||||||
|
|
||||||
|
def my_parse_args():
|
||||||
|
parser = argparse.ArgumentParser("Test")
|
||||||
|
|
||||||
|
parser.add_argument("--input", type=str)
|
||||||
|
parser.add_argument("--output", type=str)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
args = my_parse_args()
|
||||||
|
main(args)
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-contrib-automl-pipeline-steps
|
||||||
@@ -20,27 +20,34 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Automated Machine Learning\n",
|
"<font color=\"red\" size=\"5\"><strong>!Important!</strong> </br>This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-orange-juice-sales)).</font>"
|
||||||
"_**Orange Juice Sales Forecasting**_\n",
|
|
||||||
"\n",
|
|
||||||
"## Contents\n",
|
|
||||||
"1. [Introduction](#Introduction)\n",
|
|
||||||
"1. [Setup](#Setup)\n",
|
|
||||||
"1. [Compute](#Compute)\n",
|
|
||||||
"1. [Data](#Data)\n",
|
|
||||||
"1. [Train](#Train)\n",
|
|
||||||
"1. [Predict](#Predict)\n",
|
|
||||||
"1. [Operationalize](#Operationalize)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Introduction\n",
|
"# Automated Machine Learning\n",
|
||||||
|
"_**Orange Juice Sales Forecasting**_\n",
|
||||||
|
"\n",
|
||||||
|
"## Contents\n",
|
||||||
|
"1. [Introduction](#introduction)\n",
|
||||||
|
"1. [Setup](#setup)\n",
|
||||||
|
"1. [Compute](#compute)\n",
|
||||||
|
"1. [Data](#data)\n",
|
||||||
|
"1. [Train](#train)\n",
|
||||||
|
"1. [Forecast](#forecast)\n",
|
||||||
|
"1. [Operationalize](#operationalize)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Introduction<a id=\"introduction\"></a>\n",
|
||||||
"In this example, we use AutoML to train, select, and operationalize a time-series forecasting model for multiple time-series.\n",
|
"In this example, we use AutoML to train, select, and operationalize a time-series forecasting model for multiple time-series.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Make sure you have executed the [configuration notebook](../../../configuration.ipynb) before running this notebook.\n",
|
"Make sure you have executed the [configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) before running this notebook.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"The examples in the follow code samples use the University of Chicago's Dominick's Finer Foods dataset to forecast orange juice sales. Dominick's was a grocery chain in the Chicago metropolitan area."
|
"The examples in the follow code samples use the University of Chicago's Dominick's Finer Foods dataset to forecast orange juice sales. Dominick's was a grocery chain in the Chicago metropolitan area."
|
||||||
]
|
]
|
||||||
@@ -49,7 +56,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Setup"
|
"## Setup<a id=\"setup\"></a>"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -58,22 +65,22 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import azureml.core\n",
|
"import json\n",
|
||||||
"import pandas as pd\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import logging\n",
|
"import logging\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from azureml.core.workspace import Workspace\n",
|
"import azureml.core\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"from azureml.automl.core.featurization import FeaturizationConfig\n",
|
||||||
"from azureml.core.experiment import Experiment\n",
|
"from azureml.core.experiment import Experiment\n",
|
||||||
"from azureml.train.automl import AutoMLConfig\n",
|
"from azureml.core.workspace import Workspace\n",
|
||||||
"from azureml.automl.core.featurization import FeaturizationConfig"
|
"from azureml.train.automl import AutoMLConfig"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -82,7 +89,6 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -102,19 +108,20 @@
|
|||||||
"ws = Workspace.from_config()\n",
|
"ws = Workspace.from_config()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# choose a name for the run history container in the workspace\n",
|
"# choose a name for the run history container in the workspace\n",
|
||||||
"experiment_name = 'automl-ojforecasting'\n",
|
"experiment_name = \"automl-ojforecasting\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"experiment = Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = {}\n",
|
"output = {}\n",
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
"output['Workspace'] = ws.name\n",
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
"output['SKU'] = ws.sku\n",
|
"output[\"SKU\"] = ws.sku\n",
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output['Run History Name'] = experiment_name\n",
|
"output[\"Run History Name\"] = experiment_name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -122,8 +129,11 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Compute\n",
|
"## Compute<a id=\"compute\"></a>\n",
|
||||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||||
|
"\n",
|
||||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||||
@@ -144,10 +154,11 @@
|
|||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
" max_nodes=6)\n",
|
" vm_size=\"STANDARD_D12_V2\", max_nodes=6\n",
|
||||||
|
" )\n",
|
||||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
@@ -157,7 +168,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Data\n",
|
"## Data<a id=\"data\"></a>\n",
|
||||||
"You are now ready to load the historical orange juice sales data. We will load the CSV file into a plain pandas DataFrame; the time column in the CSV is called _WeekStarting_, so it will be specially parsed into the datetime type."
|
"You are now ready to load the historical orange juice sales data. We will load the CSV file into a plain pandas DataFrame; the time column in the CSV is called _WeekStarting_, so it will be specially parsed into the datetime type."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -167,11 +178,11 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"time_column_name = 'WeekStarting'\n",
|
"time_column_name = \"WeekStarting\"\n",
|
||||||
"data = pd.read_csv(\"dominicks_OJ.csv\", parse_dates=[time_column_name])\n",
|
"data = pd.read_csv(\"dominicks_OJ.csv\", parse_dates=[time_column_name])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Drop the columns 'logQuantity' as it is a leaky feature.\n",
|
"# Drop the columns 'logQuantity' as it is a leaky feature.\n",
|
||||||
"data.drop('logQuantity', axis=1, inplace=True)\n",
|
"data.drop(\"logQuantity\", axis=1, inplace=True)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"data.head()"
|
"data.head()"
|
||||||
]
|
]
|
||||||
@@ -191,9 +202,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"time_series_id_column_names = ['Store', 'Brand']\n",
|
"time_series_id_column_names = [\"Store\", \"Brand\"]\n",
|
||||||
"nseries = data.groupby(time_series_id_column_names).ngroups\n",
|
"nseries = data.groupby(time_series_id_column_names).ngroups\n",
|
||||||
"print('Data contains {0} individual time-series.'.format(nseries))"
|
"print(\"Data contains {0} individual time-series.\".format(nseries))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -212,7 +223,7 @@
|
|||||||
"use_stores = [2, 5, 8]\n",
|
"use_stores = [2, 5, 8]\n",
|
||||||
"data_subset = data[data.Store.isin(use_stores)]\n",
|
"data_subset = data[data.Store.isin(use_stores)]\n",
|
||||||
"nseries = data_subset.groupby(time_series_id_column_names).ngroups\n",
|
"nseries = data_subset.groupby(time_series_id_column_names).ngroups\n",
|
||||||
"print('Data subset contains {0} individual time-series.'.format(nseries))"
|
"print(\"Data subset contains {0} individual time-series.\".format(nseries))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -231,14 +242,19 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"n_test_periods = 20\n",
|
"n_test_periods = 20\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"\n",
|
||||||
"def split_last_n_by_series_id(df, n):\n",
|
"def split_last_n_by_series_id(df, n):\n",
|
||||||
" \"\"\"Group df by series identifiers and split on last n rows for each group.\"\"\"\n",
|
" \"\"\"Group df by series identifiers and split on last n rows for each group.\"\"\"\n",
|
||||||
" df_grouped = (df.sort_values(time_column_name) # Sort by ascending time\n",
|
" df_grouped = df.sort_values(time_column_name).groupby( # Sort by ascending time\n",
|
||||||
" .groupby(time_series_id_column_names, group_keys=False))\n",
|
" time_series_id_column_names, group_keys=False\n",
|
||||||
|
" )\n",
|
||||||
" df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n])\n",
|
" df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n])\n",
|
||||||
|
" df_head.reset_index(inplace=True, drop=True)\n",
|
||||||
" df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:])\n",
|
" df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:])\n",
|
||||||
|
" df_tail.reset_index(inplace=True, drop=True)\n",
|
||||||
" return df_head, df_tail\n",
|
" return df_head, df_tail\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"\n",
|
||||||
"train, test = split_last_n_by_series_id(data_subset, n_test_periods)"
|
"train, test = split_last_n_by_series_id(data_subset, n_test_periods)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -256,18 +272,15 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"train.to_csv (r'./dominicks_OJ_train.csv', index = None, header=True)\n",
|
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||||
"test.to_csv (r'./dominicks_OJ_test.csv', index = None, header=True)"
|
"\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"datastore = ws.get_default_datastore()\n",
|
"datastore = ws.get_default_datastore()\n",
|
||||||
"datastore.upload_files(files = ['./dominicks_OJ_train.csv', './dominicks_OJ_test.csv'], target_path = 'dataset/', overwrite = True,show_progress = True)"
|
"train_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||||
|
" train, target=(datastore, \"dataset/\"), name=\"dominicks_OJ_train\"\n",
|
||||||
|
")\n",
|
||||||
|
"test_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||||
|
" test, target=(datastore, \"dataset/\"), name=\"dominicks_OJ_test\"\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -277,16 +290,6 @@
|
|||||||
"### Create dataset for training"
|
"### Create dataset for training"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from azureml.core.dataset import Dataset\n",
|
|
||||||
"train_dataset = Dataset.Tabular.from_delimited_files(path=datastore.path('dataset/dominicks_OJ_train.csv'))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
@@ -320,7 +323,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"target_column_name = 'Quantity'"
|
"target_column_name = \"Quantity\""
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -348,13 +351,17 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"featurization_config = FeaturizationConfig()\n",
|
"featurization_config = FeaturizationConfig()\n",
|
||||||
"# Force the CPWVOL5 feature to be numeric type.\n",
|
"# Force the CPWVOL5 feature to be numeric type.\n",
|
||||||
"featurization_config.add_column_purpose('CPWVOL5', 'Numeric')\n",
|
"featurization_config.add_column_purpose(\"CPWVOL5\", \"Numeric\")\n",
|
||||||
"# Fill missing values in the target column, Quantity, with zeros.\n",
|
"# Fill missing values in the target column, Quantity, with zeros.\n",
|
||||||
"featurization_config.add_transformer_params('Imputer', ['Quantity'], {\"strategy\": \"constant\", \"fill_value\": 0})\n",
|
"featurization_config.add_transformer_params(\n",
|
||||||
|
" \"Imputer\", [\"Quantity\"], {\"strategy\": \"constant\", \"fill_value\": 0}\n",
|
||||||
|
")\n",
|
||||||
"# Fill missing values in the INCOME column with median value.\n",
|
"# Fill missing values in the INCOME column with median value.\n",
|
||||||
"featurization_config.add_transformer_params('Imputer', ['INCOME'], {\"strategy\": \"median\"})\n",
|
"featurization_config.add_transformer_params(\n",
|
||||||
|
" \"Imputer\", [\"INCOME\"], {\"strategy\": \"median\"}\n",
|
||||||
|
")\n",
|
||||||
"# Fill missing values in the Price column with forward fill (last value carried forward).\n",
|
"# Fill missing values in the Price column with forward fill (last value carried forward).\n",
|
||||||
"featurization_config.add_transformer_params('Imputer', ['Price'], {\"strategy\": \"ffill\"})"
|
"featurization_config.add_transformer_params(\"Imputer\", [\"Price\"], {\"strategy\": \"ffill\"})"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -370,14 +377,15 @@
|
|||||||
"|**time_column_name**|The name of your time column.|\n",
|
"|**time_column_name**|The name of your time column.|\n",
|
||||||
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n",
|
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n",
|
||||||
"|**time_series_id_column_names**|The column names used to uniquely identify the time series in data that has multiple rows with the same timestamp. If the time series identifiers are not defined, the data set is assumed to be one time series.|\n",
|
"|**time_series_id_column_names**|The column names used to uniquely identify the time series in data that has multiple rows with the same timestamp. If the time series identifiers are not defined, the data set is assumed to be one time series.|\n",
|
||||||
"|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information."
|
"|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.\n",
|
||||||
|
"|**cv_step_size**|Number of periods between two consecutive cross-validation folds. The default value is \"auto\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Train\n",
|
"## Train<a id=\"train\"></a>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"The [AutoMLConfig](https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.automlconfig.automlconfig?view=azure-ml-py) object defines the settings and data for an AutoML training job. Here, we set necessary inputs like the task type, the number of AutoML iterations to try, the training data, and cross-validation parameters.\n",
|
"The [AutoMLConfig](https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.automlconfig.automlconfig?view=azure-ml-py) object defines the settings and data for an AutoML training job. Here, we set necessary inputs like the task type, the number of AutoML iterations to try, the training data, and cross-validation parameters.\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -392,7 +400,7 @@
|
|||||||
"In the first case, AutoML loops over all time-series in your dataset and trains one model (e.g. AutoArima or Prophet, as the case may be) for each series. This can result in long runtimes to train these models if there are a lot of series in the data. One way to mitigate this problem is to fit models for different series in parallel if you have multiple compute cores available. To enable this behavior, set the `max_cores_per_iteration` parameter in your AutoMLConfig as shown in the example in the next cell. \n",
|
"In the first case, AutoML loops over all time-series in your dataset and trains one model (e.g. AutoArima or Prophet, as the case may be) for each series. This can result in long runtimes to train these models if there are a lot of series in the data. One way to mitigate this problem is to fit models for different series in parallel if you have multiple compute cores available. To enable this behavior, set the `max_cores_per_iteration` parameter in your AutoMLConfig as shown in the example in the next cell. \n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Finally, a note about the cross-validation (CV) procedure for time-series data. AutoML uses out-of-sample error estimates to select a best pipeline/model, so it is important that the CV fold splitting is done correctly. Time-series can violate the basic statistical assumptions of the canonical K-Fold CV strategy, so AutoML implements a [rolling origin validation](https://robjhyndman.com/hyndsight/tscv/) procedure to create CV folds for time-series data. To use this procedure, you just need to specify the desired number of CV folds in the AutoMLConfig object. It is also possible to bypass CV and use your own validation set by setting the *validation_data* parameter of AutoMLConfig.\n",
|
"Finally, a note about the cross-validation (CV) procedure for time-series data. AutoML uses out-of-sample error estimates to select a best pipeline/model, so it is important that the CV fold splitting is done correctly. Time-series can violate the basic statistical assumptions of the canonical K-Fold CV strategy, so AutoML implements a [rolling origin validation](https://robjhyndman.com/hyndsight/tscv/) procedure to create CV folds for time-series data. To use this procedure, you could specify the desired number of CV folds and the number of periods between two consecutive folds in the AutoMLConfig object, or AutoMl could set them automatically if you don't specify them. It is also possible to bypass CV and use your own validation set by setting the *validation_data* parameter of AutoMLConfig.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Here is a summary of AutoMLConfig parameters used for training the OJ model:\n",
|
"Here is a summary of AutoMLConfig parameters used for training the OJ model:\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -405,7 +413,7 @@
|
|||||||
"|**training_data**|Input dataset, containing both features and label column.|\n",
|
"|**training_data**|Input dataset, containing both features and label column.|\n",
|
||||||
"|**label_column_name**|The name of the label column.|\n",
|
"|**label_column_name**|The name of the label column.|\n",
|
||||||
"|**compute_target**|The remote compute for training.|\n",
|
"|**compute_target**|The remote compute for training.|\n",
|
||||||
"|**n_cross_validations**|Number of cross-validation folds to use for model/pipeline selection|\n",
|
"|**n_cross_validations**|Number of cross-validation folds to use for model/pipeline selection. The default value is \"auto\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value.\n",
|
||||||
"|**enable_voting_ensemble**|Allow AutoML to create a Voting ensemble of the best performing models|\n",
|
"|**enable_voting_ensemble**|Allow AutoML to create a Voting ensemble of the best performing models|\n",
|
||||||
"|**enable_stack_ensemble**|Allow AutoML to create a Stack ensemble of the best performing models|\n",
|
"|**enable_stack_ensemble**|Allow AutoML to create a Stack ensemble of the best performing models|\n",
|
||||||
"|**debug_log**|Log file path for writing debugging information|\n",
|
"|**debug_log**|Log file path for writing debugging information|\n",
|
||||||
@@ -420,25 +428,30 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||||
|
"\n",
|
||||||
"forecasting_parameters = ForecastingParameters(\n",
|
"forecasting_parameters = ForecastingParameters(\n",
|
||||||
" time_column_name=time_column_name,\n",
|
" time_column_name=time_column_name,\n",
|
||||||
" forecast_horizon=n_test_periods,\n",
|
" forecast_horizon=n_test_periods,\n",
|
||||||
" time_series_id_column_names=time_series_id_column_names\n",
|
" time_series_id_column_names=time_series_id_column_names,\n",
|
||||||
|
" freq=\"W-THU\", # Set the forecast frequency to be weekly (start on each Thursday)\n",
|
||||||
|
" cv_step_size=\"auto\",\n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task='forecasting',\n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" debug_log='automl_oj_sales_errors.log',\n",
|
" task=\"forecasting\",\n",
|
||||||
" primary_metric='normalized_mean_absolute_error',\n",
|
" debug_log=\"automl_oj_sales_errors.log\",\n",
|
||||||
" experiment_timeout_hours=0.25,\n",
|
" primary_metric=\"normalized_mean_absolute_error\",\n",
|
||||||
" training_data=train_dataset,\n",
|
" experiment_timeout_hours=0.25,\n",
|
||||||
" label_column_name=target_column_name,\n",
|
" training_data=train_dataset,\n",
|
||||||
" compute_target=compute_target,\n",
|
" label_column_name=target_column_name,\n",
|
||||||
" enable_early_stopping=True,\n",
|
" compute_target=compute_target,\n",
|
||||||
" featurization=featurization_config,\n",
|
" enable_early_stopping=True,\n",
|
||||||
" n_cross_validations=3,\n",
|
" featurization=featurization_config,\n",
|
||||||
" verbosity=logging.INFO,\n",
|
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||||
" max_cores_per_iteration=-1,\n",
|
" verbosity=logging.INFO,\n",
|
||||||
" forecasting_parameters=forecasting_parameters)"
|
" max_cores_per_iteration=-1,\n",
|
||||||
|
" forecasting_parameters=forecasting_parameters,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -455,8 +468,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"remote_run = experiment.submit(automl_config, show_output=False)\n",
|
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||||
"remote_run"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -472,8 +484,8 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Retrieve the Best Model\n",
|
"### Retrieve the Best Run details\n",
|
||||||
"Each run within an Experiment stores serialized (i.e. pickled) pipelines from the AutoML iterations. We can now retrieve the pipeline with the best performance on the validation dataset:"
|
"Below we retrieve the best Run object from among all the runs in the experiment."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -482,9 +494,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"best_run, fitted_model = remote_run.get_output()\n",
|
"best_run = remote_run.get_best_child()\n",
|
||||||
"print(fitted_model.steps)\n",
|
"model_name = best_run.properties[\"model_name\"]\n",
|
||||||
"model_name = best_run.properties['model_name']"
|
"best_run"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -502,25 +514,37 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"custom_featurizer = fitted_model.named_steps['timeseriestransformer']"
|
"# Download the featurization summary JSON file locally\n",
|
||||||
]
|
"best_run.download_file(\n",
|
||||||
},
|
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||||
{
|
")\n",
|
||||||
"cell_type": "code",
|
"\n",
|
||||||
"execution_count": null,
|
"# Render the JSON as a pandas DataFrame\n",
|
||||||
"metadata": {},
|
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||||
"outputs": [],
|
" records = json.load(f)\n",
|
||||||
"source": [
|
"fs = pd.DataFrame.from_records(records)\n",
|
||||||
"custom_featurizer.get_featurization_summary()"
|
"\n",
|
||||||
|
"# View a summary of the featurization\n",
|
||||||
|
"fs[\n",
|
||||||
|
" [\n",
|
||||||
|
" \"RawFeatureName\",\n",
|
||||||
|
" \"TypeDetected\",\n",
|
||||||
|
" \"Dropped\",\n",
|
||||||
|
" \"EngineeredFeatureCount\",\n",
|
||||||
|
" \"Transformations\",\n",
|
||||||
|
" ]\n",
|
||||||
|
"]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Forecasting\n",
|
"# Forecast<a id=\"forecast\"></a>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Now that we have retrieved the best pipeline/model, it can be used to make predictions on test data. First, we remove the target values from the test set:"
|
"Now that we have retrieved the best pipeline/model, it can be used to make predictions on test data. We will do batch scoring on the test dataset which should have the same schema as training dataset.\n",
|
||||||
|
"\n",
|
||||||
|
"The inference will run on a remote compute. In this example, it will re-use the training compute."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -529,17 +553,15 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"X_test = test\n",
|
"test_experiment = Experiment(ws, experiment_name + \"_inference\")"
|
||||||
"y_test = X_test.pop(target_column_name).values"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "markdown",
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
"source": [
|
||||||
"X_test.head()"
|
"### Retrieving forecasts from the model\n",
|
||||||
|
"We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -555,18 +577,19 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# forecast returns the predictions and the featurized data, aligned to X_test.\n",
|
"from run_forecast import run_remote_inference\n",
|
||||||
"# This contains the assumptions that were made in the forecast\n",
|
|
||||||
"y_predictions, X_trans = fitted_model.forecast(X_test)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"If you are used to scikit pipelines, perhaps you expected `predict(X_test)`. However, forecasting requires a more general interface that also supplies the past target `y` values. Please use `forecast(X,y)` as `predict(X)` is reserved for internal purposes on forecasting models.\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"The [forecast function notebook](../forecasting-forecast-function/auto-ml-forecasting-function.ipynb)."
|
"remote_run_infer = run_remote_inference(\n",
|
||||||
|
" test_experiment=test_experiment,\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" train_run=best_run,\n",
|
||||||
|
" test_dataset=test_dataset,\n",
|
||||||
|
" target_column_name=target_column_name,\n",
|
||||||
|
")\n",
|
||||||
|
"remote_run_infer.wait_for_completion(show_output=False)\n",
|
||||||
|
"\n",
|
||||||
|
"# download the forecast file to the local machine\n",
|
||||||
|
"remote_run_infer.download_file(\"outputs/predictions.csv\", \"predictions.csv\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -586,8 +609,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"assign_dict = {'predicted': y_predictions, target_column_name: y_test}\n",
|
"# load forecast data frame\n",
|
||||||
"df_all = X_test.assign(**assign_dict)"
|
"fcst_df = pd.read_csv(\"predictions.csv\", parse_dates=[time_column_name])\n",
|
||||||
|
"fcst_df.head()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -602,19 +626,24 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"# use automl scoring module\n",
|
"# use automl scoring module\n",
|
||||||
"scores = scoring.score_regression(\n",
|
"scores = scoring.score_regression(\n",
|
||||||
" y_test=df_all[target_column_name],\n",
|
" y_test=fcst_df[target_column_name],\n",
|
||||||
" y_pred=df_all['predicted'],\n",
|
" y_pred=fcst_df[\"predicted\"],\n",
|
||||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
|
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"[Test data scores]\\n\")\n",
|
"print(\"[Test data scores]\\n\")\n",
|
||||||
"for key, value in scores.items(): \n",
|
"for key, value in scores.items():\n",
|
||||||
" print('{}: {:.3f}'.format(key, value))\n",
|
" print(\"{}: {:.3f}\".format(key, value))\n",
|
||||||
" \n",
|
"\n",
|
||||||
"# Plot outputs\n",
|
"# Plot outputs\n",
|
||||||
"%matplotlib inline\n",
|
"%matplotlib inline\n",
|
||||||
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
|
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df[\"predicted\"], color=\"b\")\n",
|
||||||
"test_test = plt.scatter(df_all[target_column_name], df_all[target_column_name], color='g')\n",
|
"test_test = plt.scatter(\n",
|
||||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
" fcst_df[target_column_name], fcst_df[target_column_name], color=\"g\"\n",
|
||||||
|
")\n",
|
||||||
|
"plt.legend(\n",
|
||||||
|
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||||
|
")\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -622,7 +651,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Operationalize"
|
"# Operationalize<a id=\"operationalize\"></a>"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -638,9 +667,11 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"description = 'AutoML OJ forecaster'\n",
|
"description = \"AutoML OJ forecaster\"\n",
|
||||||
"tags = None\n",
|
"tags = None\n",
|
||||||
"model = remote_run.register_model(model_name = model_name, description = description, tags = tags)\n",
|
"model = remote_run.register_model(\n",
|
||||||
|
" model_name=model_name, description=description, tags=tags\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(remote_run.model_id)"
|
"print(remote_run.model_id)"
|
||||||
]
|
]
|
||||||
@@ -660,8 +691,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"script_file_name = 'score_fcast.py'\n",
|
"script_file_name = \"score_fcast.py\"\n",
|
||||||
"best_run.download_file('outputs/scoring_file_v_1_0_0.py', script_file_name)"
|
"best_run.download_file(\"outputs/scoring_file_v_1_0_0.py\", script_file_name)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -682,15 +713,18 @@
|
|||||||
"from azureml.core.webservice import Webservice\n",
|
"from azureml.core.webservice import Webservice\n",
|
||||||
"from azureml.core.model import Model\n",
|
"from azureml.core.model import Model\n",
|
||||||
"\n",
|
"\n",
|
||||||
"inference_config = InferenceConfig(environment = best_run.get_environment(), \n",
|
"inference_config = InferenceConfig(\n",
|
||||||
" entry_script = script_file_name)\n",
|
" environment=best_run.get_environment(), entry_script=script_file_name\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, \n",
|
"aciconfig = AciWebservice.deploy_configuration(\n",
|
||||||
" memory_gb = 2, \n",
|
" cpu_cores=2,\n",
|
||||||
" tags = {'type': \"automl-forecasting\"},\n",
|
" memory_gb=4,\n",
|
||||||
" description = \"Automl forecasting sample service\")\n",
|
" tags={\"type\": \"automl-forecasting\"},\n",
|
||||||
|
" description=\"Automl forecasting sample service\",\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"aci_service_name = 'automl-oj-forecast-01'\n",
|
"aci_service_name = \"automl-oj-forecast-03\"\n",
|
||||||
"print(aci_service_name)\n",
|
"print(aci_service_name)\n",
|
||||||
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
|
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
|
||||||
"aci_service.wait_for_deployment(True)\n",
|
"aci_service.wait_for_deployment(True)\n",
|
||||||
@@ -720,19 +754,27 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import json\n",
|
"import json\n",
|
||||||
"X_query = X_test.copy()\n",
|
"\n",
|
||||||
|
"X_query = test.copy()\n",
|
||||||
|
"X_query.pop(target_column_name)\n",
|
||||||
"# We have to convert datetime to string, because Timestamps cannot be serialized to JSON.\n",
|
"# We have to convert datetime to string, because Timestamps cannot be serialized to JSON.\n",
|
||||||
"X_query[time_column_name] = X_query[time_column_name].astype(str)\n",
|
"X_query[time_column_name] = X_query[time_column_name].astype(str)\n",
|
||||||
"# The Service object accept the complex dictionary, which is internally converted to JSON string.\n",
|
"# The Service object accept the complex dictionary, which is internally converted to JSON string.\n",
|
||||||
"# The section 'data' contains the data frame in the form of dictionary.\n",
|
"# The section 'data' contains the data frame in the form of dictionary.\n",
|
||||||
"test_sample = json.dumps({'data': X_query.to_dict(orient='records')})\n",
|
"sample_quantiles = [0.025, 0.975]\n",
|
||||||
"response = aci_service.run(input_data = test_sample)\n",
|
"test_sample = json.dumps(\n",
|
||||||
|
" {\"data\": X_query.to_dict(orient=\"records\"), \"quantiles\": sample_quantiles}\n",
|
||||||
|
")\n",
|
||||||
|
"response = aci_service.run(input_data=test_sample)\n",
|
||||||
"# translate from networkese to datascientese\n",
|
"# translate from networkese to datascientese\n",
|
||||||
"try: \n",
|
"try:\n",
|
||||||
" res_dict = json.loads(response)\n",
|
" res_dict = json.loads(response)\n",
|
||||||
" y_fcst_all = pd.DataFrame(res_dict['index'])\n",
|
" y_fcst_all = pd.DataFrame(res_dict[\"index\"])\n",
|
||||||
" y_fcst_all[time_column_name] = pd.to_datetime(y_fcst_all[time_column_name], unit = 'ms')\n",
|
" y_fcst_all[time_column_name] = pd.to_datetime(\n",
|
||||||
" y_fcst_all['forecast'] = res_dict['forecast'] \n",
|
" y_fcst_all[time_column_name], unit=\"ms\"\n",
|
||||||
|
" )\n",
|
||||||
|
" y_fcst_all[\"forecast\"] = res_dict[\"forecast\"]\n",
|
||||||
|
" y_fcst_all[\"prediction_interval\"] = res_dict[\"prediction_interval\"]\n",
|
||||||
"except:\n",
|
"except:\n",
|
||||||
" print(res_dict)"
|
" print(res_dict)"
|
||||||
]
|
]
|
||||||
@@ -759,8 +801,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"serv = Webservice(ws, 'automl-oj-forecast-01')\n",
|
"serv = Webservice(ws, \"automl-oj-forecast-03\")\n",
|
||||||
"serv.delete() # don't do it accidentally"
|
"serv.delete() # don't do it accidentally"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@@ -788,9 +830,9 @@
|
|||||||
"friendly_name": "Forecasting orange juice sales with deployment",
|
"friendly_name": "Forecasting orange juice sales with deployment",
|
||||||
"index_order": 1,
|
"index_order": 1,
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.6",
|
"display_name": "Python 3.8 - AzureML",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python36"
|
"name": "python38-azureml"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
@@ -802,12 +844,17 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.6.8"
|
"version": "3.8.10"
|
||||||
},
|
},
|
||||||
"tags": [
|
"tags": [
|
||||||
"None"
|
"None"
|
||||||
],
|
],
|
||||||
"task": "Forecasting"
|
"task": "Forecasting",
|
||||||
|
"vscode": {
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca"
|
||||||
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 4
|
"nbformat_minor": 4
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-orange-juice-sales
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -0,0 +1,61 @@
|
|||||||
|
"""
|
||||||
|
This is the script that is executed on the compute instance. It relies
|
||||||
|
on the model.pkl file which is uploaded along with this script to the
|
||||||
|
compute instance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
from azureml.core import Dataset, Run
|
||||||
|
import joblib
|
||||||
|
from pandas.tseries.frequencies import to_offset
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"--target_column_name",
|
||||||
|
type=str,
|
||||||
|
dest="target_column_name",
|
||||||
|
help="Target Column Name",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
target_column_name = args.target_column_name
|
||||||
|
test_dataset_id = args.test_dataset
|
||||||
|
|
||||||
|
run = Run.get_context()
|
||||||
|
ws = run.experiment.workspace
|
||||||
|
|
||||||
|
# get the input dataset by id
|
||||||
|
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
|
||||||
|
|
||||||
|
X_test = test_dataset.to_pandas_dataframe().reset_index(drop=True)
|
||||||
|
y_test = X_test.pop(target_column_name).values
|
||||||
|
|
||||||
|
# generate forecast
|
||||||
|
fitted_model = joblib.load("model.pkl")
|
||||||
|
# We have default quantiles values set as below(95th percentile)
|
||||||
|
quantiles = [0.025, 0.5, 0.975]
|
||||||
|
predicted_column_name = "predicted"
|
||||||
|
PI = "prediction_interval"
|
||||||
|
fitted_model.quantiles = quantiles
|
||||||
|
pred_quantiles = fitted_model.forecast_quantiles(X_test)
|
||||||
|
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
|
||||||
|
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
|
||||||
|
)
|
||||||
|
X_test[target_column_name] = y_test
|
||||||
|
X_test[PI] = pred_quantiles[PI]
|
||||||
|
X_test[predicted_column_name] = pred_quantiles[0.5]
|
||||||
|
# drop rows where prediction or actuals are nan
|
||||||
|
# happens because of missing actuals
|
||||||
|
# or at edges of time due to lags/rolling windows
|
||||||
|
clean = X_test[
|
||||||
|
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
|
||||||
|
]
|
||||||
|
|
||||||
|
file_name = "outputs/predictions.csv"
|
||||||
|
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
|
||||||
|
|
||||||
|
# Upload the predictions into artifacts
|
||||||
|
run.upload_file(name=file_name, path_or_stream=file_name)
|
||||||
@@ -0,0 +1,49 @@
|
|||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
from azureml.core import ScriptRunConfig
|
||||||
|
|
||||||
|
|
||||||
|
def run_remote_inference(
|
||||||
|
test_experiment,
|
||||||
|
compute_target,
|
||||||
|
train_run,
|
||||||
|
test_dataset,
|
||||||
|
target_column_name,
|
||||||
|
inference_folder="./forecast",
|
||||||
|
):
|
||||||
|
# Create local directory to copy the model.pkl and forecsting_script.py files into.
|
||||||
|
# These files will be uploaded to and executed on the compute instance.
|
||||||
|
os.makedirs(inference_folder, exist_ok=True)
|
||||||
|
shutil.copy("forecasting_script.py", inference_folder)
|
||||||
|
|
||||||
|
train_run.download_file(
|
||||||
|
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
|
||||||
|
)
|
||||||
|
|
||||||
|
inference_env = train_run.get_environment()
|
||||||
|
|
||||||
|
config = ScriptRunConfig(
|
||||||
|
source_directory=inference_folder,
|
||||||
|
script="forecasting_script.py",
|
||||||
|
arguments=[
|
||||||
|
"--target_column_name",
|
||||||
|
target_column_name,
|
||||||
|
"--test_dataset",
|
||||||
|
test_dataset.as_named_input(test_dataset.name),
|
||||||
|
],
|
||||||
|
compute_target=compute_target,
|
||||||
|
environment=inference_env,
|
||||||
|
)
|
||||||
|
|
||||||
|
run = test_experiment.submit(
|
||||||
|
config,
|
||||||
|
tags={
|
||||||
|
"training_run_id": train_run.id,
|
||||||
|
"run_algorithm": train_run.properties["run_algorithm"],
|
||||||
|
"valid_score": train_run.properties["score"],
|
||||||
|
"primary_metric": train_run.properties["primary_metric"],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||||
|
return run
|
||||||