Compare commits
348 Commits
azureml-sd
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f1aff553c4 | ||
|
|
d195a673e2 | ||
|
|
8dce0fa6fe | ||
|
|
4e8a240a71 | ||
|
|
5b019e28de | ||
|
|
bf4cb1e86c | ||
|
|
eaa7c56590 | ||
|
|
8fc0fa040d | ||
|
|
56e13b0b9a | ||
|
|
785fe3c962 | ||
|
|
3c341f6e9a | ||
|
|
aae88e87ea | ||
|
|
2352e458c7 | ||
|
|
8373b93887 | ||
|
|
f0442166cd | ||
|
|
33ca8c7933 | ||
|
|
3fd1ce8993 | ||
|
|
aa93588190 | ||
|
|
12520400e5 | ||
|
|
35614e83fa | ||
|
|
ff22ac01cc | ||
|
|
e7dd826f34 | ||
|
|
fcc882174b | ||
|
|
6872d8a3bb | ||
|
|
a2cb4c3589 | ||
|
|
15008962b2 | ||
|
|
9414b51fac | ||
|
|
80ac414582 | ||
|
|
cbc151660b | ||
|
|
0024abc6e3 | ||
|
|
fa13385860 | ||
|
|
0c5f6daf52 | ||
|
|
c11e9fc1da | ||
|
|
280150713e | ||
|
|
bb11c80b1b | ||
|
|
d0961b98bf | ||
|
|
302589b7f9 | ||
|
|
cc85949d6d | ||
|
|
3a1824e3ad | ||
|
|
579643326d | ||
|
|
14f76f227e | ||
|
|
25baf5203a | ||
|
|
1178fcb0ba | ||
|
|
e4d84c8e45 | ||
|
|
7a3ab1e44c | ||
|
|
598a293dfa | ||
|
|
40b3068462 | ||
|
|
0ecbbbce75 | ||
|
|
9b1e130d18 | ||
|
|
0e17b33d2a | ||
|
|
34d80abd26 | ||
|
|
249278ab77 | ||
|
|
25fdb17f80 | ||
|
|
3a02a27f1e | ||
|
|
4eed9d529f | ||
|
|
f344d410a2 | ||
|
|
9dc1228063 | ||
|
|
4404e62f58 | ||
|
|
38d5743bbb | ||
|
|
0814eee151 | ||
|
|
f45b815221 | ||
|
|
bd629ae454 | ||
|
|
41de75a584 | ||
|
|
96a426dc36 | ||
|
|
824dd40f7e | ||
|
|
fa2e649fe8 | ||
|
|
e25e8e3a41 | ||
|
|
aa3670a902 | ||
|
|
ef1f9205ac | ||
|
|
3228bbfc63 | ||
|
|
f18a0dfc4d | ||
|
|
badb620261 | ||
|
|
acf46100ae | ||
|
|
cf2e3804d5 | ||
|
|
b7be42357f | ||
|
|
3ac82c07ae | ||
|
|
9743c0a1fa | ||
|
|
ba4dac530e | ||
|
|
7f7f0040fd | ||
|
|
9ca567cd9c | ||
|
|
ae7b234ba0 | ||
|
|
9788d1965f | ||
|
|
387e43a423 | ||
|
|
25f407fc81 | ||
|
|
dcb2c4638f | ||
|
|
7fb5dd3ef9 | ||
|
|
6a38f4bec3 | ||
|
|
aed078aeab | ||
|
|
f999f41ed3 | ||
|
|
07e43ee7e4 | ||
|
|
aac706c3f0 | ||
|
|
4ccb278051 | ||
|
|
64a733480b | ||
|
|
dd0976f678 | ||
|
|
15a3ca649d | ||
|
|
3c4770cfe5 | ||
|
|
8d7de05908 | ||
|
|
863faae57f | ||
|
|
8d3f5adcdb | ||
|
|
cd3394e129 | ||
|
|
ee5d0239a3 | ||
|
|
388111cedc | ||
|
|
b86191ed7f | ||
|
|
22753486de | ||
|
|
cf1d1dbf01 | ||
|
|
2e45d9800d | ||
|
|
a9a8de02ec | ||
|
|
e0c9376aab | ||
|
|
dd8339e650 | ||
|
|
1594ee64a1 | ||
|
|
83ed8222d2 | ||
|
|
b0aa91acce | ||
|
|
5928ba83bb | ||
|
|
ffa3a43979 | ||
|
|
7ce79a43f1 | ||
|
|
edcc50ab0c | ||
|
|
4a391522d0 | ||
|
|
1903f78285 | ||
|
|
a4dfcc4693 | ||
|
|
faffb3fef7 | ||
|
|
6c6227c403 | ||
|
|
e3be364e7a | ||
|
|
90e20a60e9 | ||
|
|
33a4eacf1d | ||
|
|
e30b53fddc | ||
|
|
95b0392ed2 | ||
|
|
796798cb49 | ||
|
|
08b0ba7854 | ||
|
|
ceaf82acc6 | ||
|
|
dadc93cfe5 | ||
|
|
c7076bf95c | ||
|
|
ebdffd5626 | ||
|
|
d123880562 | ||
|
|
4864e8ea60 | ||
|
|
c86db0d7fd | ||
|
|
ccfbbb3b14 | ||
|
|
c42ba64b15 | ||
|
|
6d8bf32243 | ||
|
|
9094da4085 | ||
|
|
ebf9d2855c | ||
|
|
1bbd78eb33 | ||
|
|
77f5a69e04 | ||
|
|
ce82af2ab0 | ||
|
|
2a2d2efa17 | ||
|
|
dd494e9cac | ||
|
|
352adb7487 | ||
|
|
aebe34b4e8 | ||
|
|
c7e1241e20 | ||
|
|
6529298c24 | ||
|
|
e2dddfde85 | ||
|
|
36d96f96ec | ||
|
|
7ebcfea5a3 | ||
|
|
b20bfed33a | ||
|
|
a66a92e338 | ||
|
|
c56c2c3525 | ||
|
|
4cac072fa4 | ||
|
|
aeab6b3e28 | ||
|
|
015e261f29 | ||
|
|
d2a423dde9 | ||
|
|
3ecbfd6532 | ||
|
|
02ecb2d755 | ||
|
|
122df6e846 | ||
|
|
7d6a0a2051 | ||
|
|
6cc8af80a2 | ||
|
|
f61898f718 | ||
|
|
5cb465171e | ||
|
|
0ce37dd18f | ||
|
|
d835b183a5 | ||
|
|
d3cafebff9 | ||
|
|
354b194a25 | ||
|
|
a52d67bb84 | ||
|
|
421ea3d920 | ||
|
|
24f53f1aa1 | ||
|
|
6fc5d11de2 | ||
|
|
d17547d890 | ||
|
|
928e0d4327 | ||
|
|
05327cfbb9 | ||
|
|
8f7717014b | ||
|
|
a47e50b79a | ||
|
|
8f89d88def | ||
|
|
ec97207bb1 | ||
|
|
a2d20b0f47 | ||
|
|
8180cebd75 | ||
|
|
700ab2d782 | ||
|
|
ec9a5a061d | ||
|
|
467630f955 | ||
|
|
eac6b69bae | ||
|
|
441a5b0141 | ||
|
|
70902df6da | ||
|
|
6f893ff0b4 | ||
|
|
bda592a236 | ||
|
|
8b32e8d5ad | ||
|
|
54a065c698 | ||
|
|
b9718678b3 | ||
|
|
3fa40d2c6d | ||
|
|
883e4a4c59 | ||
|
|
e90826b331 | ||
|
|
ac04172f6d | ||
|
|
8c0000beb4 | ||
|
|
35287ab0d8 | ||
|
|
3fe4f8b038 | ||
|
|
1722678469 | ||
|
|
17da7e8706 | ||
|
|
d2e7213ff3 | ||
|
|
882cb76e8a | ||
|
|
37f37a46c1 | ||
|
|
0cd1412421 | ||
|
|
c3ae9f00f6 | ||
|
|
11b02c650c | ||
|
|
606048c71f | ||
|
|
cb1c354d44 | ||
|
|
c868fff5a2 | ||
|
|
bc4e6611c4 | ||
|
|
0a58881b70 | ||
|
|
2544e85c5f | ||
|
|
7fe27501d1 | ||
|
|
624c46e7f9 | ||
|
|
40fbadd85c | ||
|
|
0c1fc25542 | ||
|
|
e8e1357229 | ||
|
|
ad44f8fa2b | ||
|
|
ee63e759f0 | ||
|
|
b81d97ebbf | ||
|
|
249fb6bbb5 | ||
|
|
cda1f3e4cf | ||
|
|
1d05efaac2 | ||
|
|
3adebd1127 | ||
|
|
a6817063df | ||
|
|
a79f8c254a | ||
|
|
fb4f287458 | ||
|
|
41366a4af0 | ||
|
|
74deb14fac | ||
|
|
4ed1d445ae | ||
|
|
b5c15db0b4 | ||
|
|
91d43bade6 | ||
|
|
bd750f5817 | ||
|
|
637bcc5973 | ||
|
|
ba741fb18d | ||
|
|
ac0ad8d487 | ||
|
|
5019ad6c5a | ||
|
|
41a2ebd2b3 | ||
|
|
53e3283d1d | ||
|
|
ba9c4c5465 | ||
|
|
a6c65f00ec | ||
|
|
95072eabc2 | ||
|
|
12905ef254 | ||
|
|
4cf56eee91 | ||
|
|
d345ff6c37 | ||
|
|
560dcac0a0 | ||
|
|
322087a58c | ||
|
|
e255c000ab | ||
|
|
7871e37ec0 | ||
|
|
58e584e7eb | ||
|
|
1b0d75cb45 | ||
|
|
5c38272fb4 | ||
|
|
e026c56f19 | ||
|
|
4aad830f1c | ||
|
|
c1b125025a | ||
|
|
9f364f7638 | ||
|
|
4beb749a76 | ||
|
|
04fe8c4580 | ||
|
|
498018451a | ||
|
|
04305e33f0 | ||
|
|
d22e76d5e0 | ||
|
|
d71c482f75 | ||
|
|
5775f8a78f | ||
|
|
aae823ecd8 | ||
|
|
f1126e07f9 | ||
|
|
0e4b27a233 | ||
|
|
0a3d5f68a1 | ||
|
|
a6fe2affcb | ||
|
|
ce469ddf6a | ||
|
|
9fe459be79 | ||
|
|
89c35c8ed6 | ||
|
|
33168c7f5d | ||
|
|
1d0766bd46 | ||
|
|
9903e56882 | ||
|
|
a039166b90 | ||
|
|
4e4bf48013 | ||
|
|
0a2408300a | ||
|
|
d99c3f5470 | ||
|
|
3f62fe7d47 | ||
|
|
6059c1dc0c | ||
|
|
8e2032fcde | ||
|
|
824d844cd7 | ||
|
|
bb1c7db690 | ||
|
|
8dad09a42f | ||
|
|
db2bf8ae93 | ||
|
|
820c09734f | ||
|
|
a2a33c70a6 | ||
|
|
2ff791968a | ||
|
|
7186127804 | ||
|
|
b01c52bfd6 | ||
|
|
28be7bcf58 | ||
|
|
37a9350fde | ||
|
|
5080053a35 | ||
|
|
3c02102691 | ||
|
|
07e1676762 | ||
|
|
919a3c078f | ||
|
|
9b53c924ed | ||
|
|
04ad58056f | ||
|
|
576bf386b5 | ||
|
|
7e62d1cfd6 | ||
|
|
ec67a569af | ||
|
|
6d1e80bcef | ||
|
|
db00d9ad3c | ||
|
|
d33c75abc3 | ||
|
|
d0dc4836ae | ||
|
|
982f8fcc1d | ||
|
|
79739b5e1b | ||
|
|
aac4fa1fb9 | ||
|
|
5b684070e1 | ||
|
|
0ab8b141ee | ||
|
|
b9ef23ad4b | ||
|
|
7e2c1ca152 | ||
|
|
d096535e48 | ||
|
|
f80512a6db | ||
|
|
b54111620e | ||
|
|
8dd52ee2df | ||
|
|
6c629f1eda | ||
|
|
9c32ca9db5 | ||
|
|
053efde8c9 | ||
|
|
5189691f06 | ||
|
|
745b4f0624 | ||
|
|
fb900916e3 | ||
|
|
738347f3da | ||
|
|
34a67c1f8b | ||
|
|
34898828be | ||
|
|
a7c3a0fdb8 | ||
|
|
6d11cdfa0a | ||
|
|
11e8ed2bab | ||
|
|
12c06a4168 | ||
|
|
1f75dc9725 | ||
|
|
1a1a42d525 | ||
|
|
879a272a8d | ||
|
|
bc65bde097 | ||
|
|
690bdfbdbe | ||
|
|
3c02bd8782 | ||
|
|
5c14610a1c | ||
|
|
4e3afae6fb | ||
|
|
a2144aa083 | ||
|
|
0e6334178f | ||
|
|
4ec9178d22 | ||
|
|
2aa7c53b0c | ||
|
|
553fa43e17 | ||
|
|
e98131729e | ||
|
|
119fd0a8f6 | ||
|
|
d4a486827d |
9
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Microsoft Open Source Code of Conduct
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
|
||||
Resources:
|
||||
|
||||
- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
|
||||
- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
|
||||
- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns
|
||||
@@ -28,7 +28,7 @@ git clone https://github.com/Azure/MachineLearningNotebooks.git
|
||||
pip install azureml-sdk[notebooks,tensorboard]
|
||||
|
||||
# install model explainability component
|
||||
pip install azureml-sdk[explain]
|
||||
pip install azureml-sdk[interpret]
|
||||
|
||||
# install automated ml components
|
||||
pip install azureml-sdk[automl]
|
||||
@@ -86,7 +86,7 @@ If you need additional Azure ML SDK components, you can either modify the Docker
|
||||
pip install azureml-sdk[automl]
|
||||
|
||||
# install the core SDK and model explainability component
|
||||
pip install azureml-sdk[explain]
|
||||
pip install azureml-sdk[interpret]
|
||||
|
||||
# install the core SDK and experimental components
|
||||
pip install azureml-sdk[contrib]
|
||||
|
||||
99
README.md
@@ -1,76 +1,43 @@
|
||||
# Azure Machine Learning service example notebooks
|
||||
# Azure Machine Learning Python SDK notebooks
|
||||
|
||||
This repository contains example notebooks demonstrating the [Azure Machine Learning](https://azure.microsoft.com/en-us/services/machine-learning-service/) Python SDK which allows you to build, train, deploy and manage machine learning solutions using Azure. The AML SDK allows you the choice of using local or cloud compute resources, while managing and maintaining the complete data science workflow from the cloud.
|
||||
### **With the introduction of AzureML SDK v2, this samples repository for the v1 SDK is now deprecated and will not be monitored or updated. Users are encouraged to visit the [v2 SDK samples repository](https://github.com/Azure/azureml-examples) instead for up-to-date and enhanced examples of how to build, train, and deploy machine learning models with AzureML's newest features.**
|
||||
|
||||

|
||||
Welcome to the Azure Machine Learning Python SDK notebooks repository!
|
||||
|
||||
## Getting started
|
||||
|
||||
## Quick installation
|
||||
```sh
|
||||
pip install azureml-sdk
|
||||
```
|
||||
Read more detailed instructions on [how to set up your environment](./NBSETUP.md) using Azure Notebook service, your own Jupyter notebook server, or Docker.
|
||||
These notebooks are recommended for use in an Azure Machine Learning [Compute Instance](https://docs.microsoft.com/azure/machine-learning/concept-compute-instance), where you can run them without any additional set up.
|
||||
|
||||
## How to navigate and use the example notebooks?
|
||||
If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, you should always run the [Configuration](./configuration.ipynb) notebook first when setting up a notebook library on a new machine or in a new environment. It configures your notebook library to connect to an Azure Machine Learning workspace, and sets up your workspace and compute to be used by many of the other examples.
|
||||
This [index](./index.md) should assist in navigating the Azure Machine Learning notebook samples and encourage efficient retrieval of topics and content.
|
||||
However, the notebooks can be run in any development environment with the correct `azureml` packages installed.
|
||||
|
||||
If you want to...
|
||||
|
||||
* ...try out and explore Azure ML, start with image classification tutorials: [Part 1 (Training)](./tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb) and [Part 2 (Deployment)](./tutorials/image-classification-mnist-data/img-classification-part2-deploy.ipynb).
|
||||
* ...learn about experimentation and tracking run history, first [train within Notebook](./how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb), then try [training on remote VM](./how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb) and [using logging APIs](./how-to-use-azureml/training/logging-api/logging-api.ipynb).
|
||||
* ...train deep learning models at scale, first learn about [Machine Learning Compute](./how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb), and then try [distributed hyperparameter tuning](./how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) and [distributed training](./how-to-use-azureml/training-with-deep-learning/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb).
|
||||
* ...deploy models as a realtime scoring service, first learn the basics by [training within Notebook and deploying to Azure Container Instance](./how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb), then learn how to [production deploy models on Azure Kubernetes Cluster](./how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb).
|
||||
* ...deploy models as a batch scoring service, first [train a model within Notebook](./how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb), then [create Machine Learning Compute for scoring compute](./how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb), and [use Machine Learning Pipelines to deploy your model](https://aka.ms/pl-batch-scoring).
|
||||
* ...monitor your deployed models, learn about using [App Insights](./how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb).
|
||||
|
||||
## Tutorials
|
||||
|
||||
The [Tutorials](./tutorials) folder contains notebooks for the tutorials described in the [Azure Machine Learning documentation](https://aka.ms/aml-docs).
|
||||
|
||||
## How to use Azure ML
|
||||
|
||||
The [How to use Azure ML](./how-to-use-azureml) folder contains specific examples demonstrating the features of the Azure Machine Learning SDK
|
||||
|
||||
- [Training](./how-to-use-azureml/training) - Examples of how to build models using Azure ML's logging and execution capabilities on local and remote compute targets
|
||||
- [Training with Deep Learning](./how-to-use-azureml/training-with-deep-learning) - Examples demonstrating how to build deep learning models using estimators and parameter sweeps
|
||||
- [Manage Azure ML Service](./how-to-use-azureml/manage-azureml-service) - Examples how to perform tasks, such as authenticate against Azure ML service in different ways.
|
||||
- [Automated Machine Learning](./how-to-use-azureml/automated-machine-learning) - Examples using Automated Machine Learning to automatically generate optimal machine learning pipelines and models
|
||||
- [Machine Learning Pipelines](./how-to-use-azureml/machine-learning-pipelines) - Examples showing how to create and use reusable pipelines for training and batch scoring
|
||||
- [Deployment](./how-to-use-azureml/deployment) - Examples showing how to deploy and manage machine learning models and solutions
|
||||
- [Azure Databricks](./how-to-use-azureml/azure-databricks) - Examples showing how to use Azure ML with Azure Databricks
|
||||
- [Monitor Models](./how-to-use-azureml/monitor-models) - Examples showing how to enable model monitoring services such as DataDrift
|
||||
|
||||
---
|
||||
## Documentation
|
||||
|
||||
* Quickstarts, end-to-end tutorials, and how-tos on the [official documentation site for Azure Machine Learning service](https://docs.microsoft.com/en-us/azure/machine-learning/service/).
|
||||
* [Python SDK reference](https://docs.microsoft.com/en-us/python/api/overview/azure/ml/intro?view=azure-ml-py)
|
||||
* Azure ML Data Prep SDK [overview](https://aka.ms/data-prep-sdk), [Python SDK reference](https://aka.ms/aml-data-prep-apiref), and [tutorials and how-tos](https://aka.ms/aml-data-prep-notebooks).
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Community Repository
|
||||
Visit this [community repository](https://github.com/microsoft/MLOps/tree/master/examples) to find useful end-to-end sample notebooks. Also, please follow these [contribution guidelines](https://github.com/microsoft/MLOps/blob/master/contributing.md) when contributing to this repository.
|
||||
|
||||
## Projects using Azure Machine Learning
|
||||
|
||||
Visit following repos to see projects contributed by Azure ML users:
|
||||
- [AMLSamples](https://github.com/Azure/AMLSamples) Number of end-to-end examples, including face recognition, predictive maintenance, customer churn and sentiment analysis.
|
||||
- [Learn about Natural Language Processing best practices using Azure Machine Learning service](https://github.com/microsoft/nlp)
|
||||
- [Pre-Train BERT models using Azure Machine Learning service](https://github.com/Microsoft/AzureML-BERT)
|
||||
- [Fashion MNIST with Azure ML SDK](https://github.com/amynic/azureml-sdk-fashion)
|
||||
- [UMass Amherst Student Samples](https://github.com/katiehouse3/microsoft-azure-ml-notebooks) - A number of end-to-end machine learning notebooks, including machine translation, image classification, and customer churn, created by students in the 696DS course at UMass Amherst.
|
||||
|
||||
## Data/Telemetry
|
||||
This repository collects usage data and sends it to Mircosoft to help improve our products and services. Read Microsoft's [privacy statement to learn more](https://privacy.microsoft.com/en-US/privacystatement)
|
||||
|
||||
To opt out of tracking, please go to the raw markdown or .ipynb files and remove the following line of code:
|
||||
Install the `azureml.core` Python package:
|
||||
|
||||
```sh
|
||||
""
|
||||
pip install azureml-core
|
||||
```
|
||||
This URL will be slightly different depending on the file.
|
||||
|
||||

|
||||
Install additional packages as needed:
|
||||
|
||||
```sh
|
||||
pip install azureml-mlflow
|
||||
pip install azureml-dataset-runtime
|
||||
pip install azureml-automl-runtime
|
||||
pip install azureml-pipeline
|
||||
pip install azureml-pipeline-steps
|
||||
...
|
||||
```
|
||||
|
||||
We recommend starting with one of the [quickstarts](tutorials/compute-instance-quickstarts).
|
||||
|
||||
## Contributing
|
||||
|
||||
This repository is a push-only mirror. Pull requests are ignored.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). Please see the [code of conduct](CODE_OF_CONDUCT.md) for details.
|
||||
|
||||
## Reference
|
||||
|
||||
- [Documentation](https://docs.microsoft.com/azure/machine-learning)
|
||||
|
||||
|
||||
41
SECURITY.md
Normal file
@@ -0,0 +1,41 @@
|
||||
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.7 BLOCK -->
|
||||
|
||||
## Security
|
||||
|
||||
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
|
||||
|
||||
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
**Please do not report security vulnerabilities through public GitHub issues.**
|
||||
|
||||
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
|
||||
|
||||
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
|
||||
|
||||
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
|
||||
|
||||
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
||||
|
||||
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
||||
* Full paths of source file(s) related to the manifestation of the issue
|
||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||
* Any special configuration required to reproduce the issue
|
||||
* Step-by-step instructions to reproduce the issue
|
||||
* Proof-of-concept or exploit code (if possible)
|
||||
* Impact of the issue, including how an attacker might exploit the issue
|
||||
|
||||
This information will help us triage your report more quickly.
|
||||
|
||||
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
|
||||
|
||||
## Preferred Languages
|
||||
|
||||
We prefer all communications to be in English.
|
||||
|
||||
## Policy
|
||||
|
||||
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
|
||||
|
||||
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
||||
@@ -103,7 +103,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.4.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.59.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -254,6 +254,8 @@
|
||||
"\n",
|
||||
"Many of the sample notebooks use Azure ML managed compute (AmlCompute) to train models using a dynamically scalable pool of compute. In this section you will create default compute clusters for use by the other notebooks and any other operations you choose.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"To create a cluster, you need to specify a compute configuration that specifies the type of machine to be used and the scalability behaviors. Then you choose a name for the cluster that is unique within the workspace that can be used to address the cluster later.\n",
|
||||
"\n",
|
||||
"The cluster parameters are:\n",
|
||||
@@ -327,7 +329,7 @@
|
||||
" print(\"Creating new gpu-cluster\")\n",
|
||||
" \n",
|
||||
" # Specify the configuration for the new cluster\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_NC6\",\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size=\"Standard_NC6s_v3\",\n",
|
||||
" min_nodes=0,\n",
|
||||
" max_nodes=4)\n",
|
||||
" # Create the cluster with the specified name and configuration\n",
|
||||
@@ -365,9 +367,9 @@
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
|
||||
@@ -174,7 +174,7 @@
|
||||
"else:\n",
|
||||
" print(\"creating new cluster\")\n",
|
||||
" # vm_size parameter below could be modified to one of the RAPIDS-supported VM types\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"Standard_NC6s_v2\", min_nodes=1, max_nodes = 1)\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"Standard_NC6s_v3\", min_nodes=1, max_nodes = 1)\n",
|
||||
"\n",
|
||||
" # create the cluster\n",
|
||||
" gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, provisioning_config)\n",
|
||||
@@ -188,13 +188,6 @@
|
||||
"### Script to process data and train model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The _process_data.py_ script used in the step below is a slightly modified implementation of [RAPIDS Mortgage E2E example](https://github.com/rapidsai/notebooks-contrib/blob/master/intermediate_notebooks/E2E/mortgage/mortgage_e2e.ipynb)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -373,7 +366,7 @@
|
||||
"run_config.target = gpu_cluster_name\n",
|
||||
"run_config.environment.docker.enabled = True\n",
|
||||
"run_config.environment.docker.gpu_support = True\n",
|
||||
"run_config.environment.docker.base_image = \"mcr.microsoft.com/azureml/base-gpu:intelmpi2018.3-cuda10.0-cudnn7-ubuntu16.04\"\n",
|
||||
"run_config.environment.docker.base_image = \"mcr.microsoft.com/azureml/openmpi4.1.0-cuda11.1-cudnn8-ubuntu20.04\"\n",
|
||||
"run_config.environment.spark.precache_packages = False\n",
|
||||
"run_config.data_references={'data':data_ref.to_config()}"
|
||||
]
|
||||
@@ -405,7 +398,7 @@
|
||||
"# run_config.target = gpu_cluster_name\n",
|
||||
"# run_config.environment.docker.enabled = True\n",
|
||||
"# run_config.environment.docker.gpu_support = True\n",
|
||||
"# run_config.environment.docker.base_image = \"rapidsai/rapidsai:cuda9.2-runtime-ubuntu18.04\"\n",
|
||||
"# run_config.environment.docker.base_image = \"rapidsai/rapidsai:cuda9.2-runtime-ubuntu20.04\"\n",
|
||||
"# # run_config.environment.docker.base_image_registry.address = '<registry_url>' # not required if the base_image is in Docker hub\n",
|
||||
"# # run_config.environment.docker.base_image_registry.username = '<user_name>' # needed only for private images\n",
|
||||
"# # run_config.environment.docker.base_image_registry.password = '<password>' # needed only for private images\n",
|
||||
@@ -532,9 +525,9 @@
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
|
||||
@@ -1,500 +0,0 @@
|
||||
1 0.644 0.247 -0.447 0.862 0.374 0.854 -1.126 -0.790 2.173 1.015 -0.201 1.400 0.000 1.575 1.807 1.607 0.000 1.585 -0.190 -0.744 3.102 0.958 1.061 0.980 0.875 0.581 0.905 0.796
|
||||
0 0.385 1.800 1.037 1.044 0.349 1.502 -0.966 1.734 0.000 0.966 -1.960 -0.249 0.000 1.501 0.465 -0.354 2.548 0.834 -0.440 0.638 3.102 0.695 0.909 0.981 0.803 0.813 1.149 1.116
|
||||
0 1.214 -0.166 0.004 0.505 1.434 0.628 -1.174 -1.230 1.087 0.579 -1.047 -0.118 0.000 0.835 0.340 1.234 2.548 0.711 -1.383 1.355 0.000 0.848 0.911 1.043 0.931 1.058 0.744 0.696
|
||||
1 0.420 1.111 0.137 1.516 -1.657 0.854 0.623 1.605 1.087 1.511 -1.297 0.251 0.000 0.872 -0.368 -0.721 0.000 0.543 0.731 1.424 3.102 1.597 1.282 1.105 0.730 0.148 1.231 1.234
|
||||
0 0.897 -1.703 -1.306 1.022 -0.729 0.836 0.859 -0.333 2.173 1.336 -0.965 0.972 2.215 0.671 1.021 -1.439 0.000 0.493 -2.019 -0.289 0.000 0.805 0.930 0.984 1.430 2.198 1.934 1.684
|
||||
0 0.756 1.126 -0.945 2.355 -0.555 0.889 0.800 1.440 0.000 0.585 0.271 0.631 2.215 0.722 1.744 1.051 0.000 0.618 0.924 0.698 1.551 0.976 0.864 0.988 0.803 0.234 0.822 0.911
|
||||
0 1.141 -0.741 0.953 1.478 -0.524 1.197 -0.871 1.689 2.173 0.875 1.321 -0.518 1.107 0.540 0.037 -0.987 0.000 0.879 1.187 0.245 0.000 0.888 0.701 1.747 1.358 2.479 1.491 1.223
|
||||
1 0.606 -0.936 -0.384 1.257 -1.162 2.719 -0.600 0.100 2.173 3.303 -0.284 1.561 1.107 0.689 1.786 -0.326 0.000 0.780 -0.532 1.216 0.000 0.936 2.022 0.985 1.574 4.323 2.263 1.742
|
||||
1 0.603 0.429 -0.279 1.448 1.301 1.008 2.423 -1.295 0.000 0.452 1.305 0.533 0.000 1.076 1.011 1.256 2.548 2.021 1.260 -0.343 0.000 0.890 0.969 1.281 0.763 0.652 0.827 0.785
|
||||
0 1.171 -0.962 0.521 0.841 -0.315 1.196 -0.744 -0.882 2.173 0.726 -1.305 1.377 1.107 0.643 -1.790 -1.264 0.000 1.257 0.222 0.817 0.000 0.862 0.911 0.987 0.846 1.293 0.899 0.756
|
||||
1 1.392 -0.358 0.235 1.494 -0.461 0.895 -0.848 1.549 2.173 0.841 -0.384 0.666 1.107 1.199 2.509 -0.891 0.000 1.109 -0.364 -0.945 0.000 0.693 2.135 1.170 1.362 0.959 2.056 1.842
|
||||
1 1.024 1.076 -0.886 0.851 1.530 0.673 -0.449 0.187 1.087 0.628 -0.895 1.176 2.215 0.696 -0.232 -0.875 0.000 0.411 1.501 0.048 0.000 0.842 0.919 1.063 1.193 0.777 0.964 0.807
|
||||
1 0.890 -0.760 1.182 1.369 0.751 0.696 -0.959 -0.710 1.087 0.775 -0.130 -1.409 2.215 0.701 -0.110 -0.739 0.000 0.508 -0.451 0.390 0.000 0.762 0.738 0.998 1.126 0.788 0.940 0.790
|
||||
1 0.460 0.537 0.636 1.442 -0.269 0.585 0.323 -1.731 2.173 0.503 1.034 -0.927 0.000 0.928 -1.024 1.006 2.548 0.513 -0.618 -1.336 0.000 0.802 0.831 0.992 1.019 0.925 1.056 0.833
|
||||
1 0.364 1.648 0.560 1.720 0.829 1.110 0.811 -0.588 0.000 0.408 1.045 1.054 2.215 0.319 -1.138 1.545 0.000 0.423 1.025 -1.265 3.102 1.656 0.928 1.003 0.544 0.327 0.670 0.746
|
||||
1 0.525 -0.096 1.206 0.948 -1.103 1.519 -0.582 0.606 2.173 1.274 -0.572 -0.934 0.000 0.855 -1.028 -1.222 0.000 0.578 -1.000 -1.725 3.102 0.896 0.878 0.981 0.498 0.909 0.772 0.668
|
||||
0 0.536 -0.821 -1.029 0.703 1.113 0.363 -0.711 0.022 1.087 0.325 1.503 1.249 2.215 0.673 1.041 -0.401 0.000 0.480 2.127 1.681 0.000 0.767 1.034 0.990 0.671 0.836 0.669 0.663
|
||||
1 1.789 -0.583 1.641 0.897 0.799 0.515 -0.100 -1.483 0.000 1.101 0.031 -0.326 2.215 1.195 0.001 0.126 2.548 0.768 -0.148 0.601 0.000 0.916 0.921 1.207 1.069 0.483 0.934 0.795
|
||||
1 1.332 -0.571 0.986 0.580 1.508 0.582 0.634 -0.746 1.087 1.084 -0.964 -0.489 0.000 0.785 0.274 0.343 2.548 0.779 0.721 1.489 0.000 1.733 1.145 0.990 1.270 0.715 0.897 0.915
|
||||
0 1.123 0.629 -1.708 0.597 -0.882 0.752 0.195 1.522 2.173 1.671 1.515 -0.003 0.000 0.778 0.514 0.139 1.274 0.801 1.260 1.600 0.000 1.495 0.976 0.988 0.676 0.921 1.010 0.943
|
||||
0 1.816 -0.515 0.171 0.980 -0.454 0.870 0.202 -1.399 2.173 1.130 1.066 -1.593 0.000 0.844 0.735 1.275 2.548 1.125 -1.133 0.348 0.000 0.837 0.693 0.988 1.112 0.784 1.009 0.974
|
||||
1 0.364 0.694 0.445 1.862 0.159 0.963 -1.356 1.260 1.087 0.887 -0.540 -1.533 2.215 0.658 -2.544 -1.236 0.000 0.516 -0.807 0.039 0.000 0.891 1.004 0.991 1.092 0.976 1.000 0.953
|
||||
1 0.790 -1.175 0.475 1.846 0.094 0.999 -1.090 0.257 0.000 1.422 0.854 1.112 2.215 1.302 1.004 -1.702 1.274 2.557 -0.787 -1.048 0.000 0.890 1.429 0.993 2.807 0.840 2.248 1.821
|
||||
1 0.765 -0.500 -0.603 1.843 -0.560 1.068 0.007 0.746 2.173 1.154 -0.017 1.329 0.000 1.165 1.791 -1.585 0.000 1.116 0.441 -0.886 0.000 0.774 0.982 0.989 1.102 0.633 1.178 1.021
|
||||
1 1.407 1.293 -1.418 0.502 -1.527 2.005 -2.122 0.622 0.000 1.699 1.508 -0.649 2.215 1.665 0.748 -0.755 0.000 2.555 0.811 1.423 1.551 7.531 5.520 0.985 1.115 1.881 4.487 3.379
|
||||
1 0.772 -0.186 -1.372 0.823 -0.140 0.781 0.763 0.046 2.173 1.128 0.516 1.380 0.000 0.797 -0.640 -0.134 2.548 2.019 -0.972 -1.670 0.000 2.022 1.466 0.989 0.856 0.808 1.230 0.991
|
||||
1 0.546 -0.954 0.715 1.335 -1.689 0.783 -0.443 -1.735 2.173 1.081 0.185 -0.435 0.000 1.433 -0.662 -0.389 0.000 0.969 0.924 1.099 0.000 0.910 0.879 0.988 0.683 0.753 0.878 0.865
|
||||
1 0.596 0.276 -1.054 1.358 1.355 1.444 1.813 -0.208 0.000 1.175 -0.949 -1.573 0.000 0.855 -1.228 -0.925 2.548 1.837 -0.400 0.913 0.000 0.637 0.901 1.028 0.553 0.790 0.679 0.677
|
||||
0 0.458 2.292 1.530 0.291 1.283 0.749 -0.930 -0.198 0.000 0.300 -1.560 0.990 0.000 0.811 -0.176 0.995 2.548 1.085 -0.178 -1.213 3.102 0.891 0.648 0.999 0.732 0.655 0.619 0.620
|
||||
0 0.638 -0.575 -1.048 0.125 0.178 0.846 -0.753 -0.339 1.087 0.799 -0.727 1.182 0.000 0.888 0.283 0.717 0.000 1.051 -1.046 -1.557 3.102 0.889 0.871 0.989 0.884 0.923 0.836 0.779
|
||||
1 0.434 -1.119 -0.313 2.427 0.461 0.497 0.261 -1.177 2.173 0.618 -0.737 -0.688 0.000 1.150 -1.237 -1.652 2.548 0.757 -0.054 1.700 0.000 0.809 0.741 0.982 1.450 0.936 1.086 0.910
|
||||
1 0.431 -1.144 -1.030 0.778 -0.655 0.490 0.047 -1.546 0.000 1.583 -0.014 0.891 2.215 0.516 0.956 0.567 2.548 0.935 -1.123 -0.082 0.000 0.707 0.995 0.995 0.700 0.602 0.770 0.685
|
||||
1 1.894 0.222 1.224 1.578 1.715 0.966 2.890 -0.013 0.000 0.922 -0.703 -0.844 0.000 0.691 2.056 1.039 0.000 0.900 -0.733 -1.240 3.102 1.292 1.992 1.026 0.881 0.684 1.759 1.755
|
||||
0 0.985 -0.316 0.141 1.067 -0.946 0.819 -1.177 1.307 2.173 1.080 -0.429 0.557 1.107 1.726 1.435 -1.075 0.000 1.100 1.547 -0.647 0.000 0.873 1.696 1.179 1.146 1.015 1.538 1.270
|
||||
0 0.998 -0.187 -0.236 0.882 0.755 0.468 0.950 -0.439 2.173 0.579 -0.550 -0.624 0.000 1.847 1.196 1.384 1.274 0.846 1.273 -1.072 0.000 1.194 0.797 1.013 1.319 1.174 0.963 0.898
|
||||
0 0.515 0.246 -0.593 1.082 1.591 0.912 -0.623 -0.957 2.173 0.858 0.418 0.844 0.000 0.948 2.519 1.599 0.000 1.158 1.385 -0.095 3.102 0.973 1.033 0.988 0.998 1.716 1.054 0.901
|
||||
0 0.919 -1.001 1.506 1.389 0.653 0.507 -0.616 -0.689 2.173 0.808 0.536 -0.467 2.215 0.496 2.187 -0.859 0.000 0.822 0.807 1.163 0.000 0.876 0.861 1.088 0.947 0.614 0.911 1.087
|
||||
0 0.794 0.051 1.477 1.504 -1.695 0.716 0.315 0.264 1.087 0.879 -0.135 -1.094 2.215 1.433 -0.741 0.201 0.000 1.566 0.534 -0.989 0.000 0.627 0.882 0.974 0.807 1.130 0.929 0.925
|
||||
1 0.455 -0.946 -1.175 1.453 -0.580 0.763 -0.856 0.840 0.000 0.829 1.223 1.174 2.215 0.714 0.638 -0.466 0.000 1.182 0.223 -1.333 0.000 0.977 0.938 0.986 0.713 0.714 0.796 0.843
|
||||
1 0.662 -0.296 -1.287 1.212 -0.707 0.641 1.457 0.222 0.000 0.600 0.525 -1.700 2.215 0.784 -0.835 -0.961 2.548 0.865 1.131 1.162 0.000 0.854 0.877 0.978 0.740 0.734 0.888 0.811
|
||||
0 0.390 0.698 -1.629 1.888 0.298 0.990 1.614 -1.572 0.000 1.666 0.170 0.719 2.215 1.590 1.064 -0.886 1.274 0.952 0.305 -1.216 0.000 1.048 0.897 1.173 0.891 1.936 1.273 1.102
|
||||
0 1.014 0.117 1.384 0.686 -1.047 0.609 -1.245 -0.850 0.000 1.076 -1.158 0.814 1.107 1.598 -0.389 -0.111 0.000 0.907 1.688 -1.673 0.000 1.333 0.866 0.989 0.975 0.442 0.797 0.788
|
||||
0 1.530 -1.408 -0.207 0.440 -1.357 0.902 -0.647 1.325 1.087 1.320 -0.819 0.246 1.107 0.503 1.407 -1.683 0.000 1.189 -0.972 -0.925 0.000 0.386 1.273 0.988 0.829 1.335 1.173 1.149
|
||||
1 1.689 -0.590 0.915 2.076 1.202 0.644 -0.478 -0.238 0.000 0.809 -1.660 -1.184 0.000 1.227 -0.224 -0.808 2.548 1.655 1.047 -0.623 0.000 0.621 1.192 0.988 1.309 0.866 0.924 1.012
|
||||
0 1.102 0.402 -1.622 1.262 1.022 0.576 0.271 -0.269 0.000 0.591 0.495 -1.278 0.000 1.271 0.209 0.575 2.548 0.941 0.964 -0.685 3.102 0.989 0.963 1.124 0.857 0.858 0.716 0.718
|
||||
0 2.491 0.825 0.581 1.593 0.205 0.782 -0.815 1.499 0.000 1.179 -0.999 -1.509 0.000 0.926 0.920 -0.522 2.548 2.068 -1.021 -1.050 3.102 0.874 0.943 0.980 0.945 1.525 1.570 1.652
|
||||
0 0.666 0.254 1.601 1.303 -0.250 1.236 -1.929 0.793 0.000 1.074 0.447 -0.871 0.000 0.991 1.059 -0.342 0.000 1.703 -0.393 -1.419 3.102 0.921 0.945 1.285 0.931 0.462 0.770 0.729
|
||||
0 0.937 -1.126 1.424 1.395 1.743 0.760 0.428 -0.238 2.173 0.846 0.494 1.320 2.215 0.872 -1.826 -0.507 0.000 0.612 1.860 1.403 0.000 3.402 2.109 0.985 1.298 1.165 1.404 1.240
|
||||
1 0.881 -1.086 -0.870 0.513 0.266 2.049 -1.870 1.160 0.000 2.259 -0.428 -0.935 2.215 1.321 -0.655 -0.449 2.548 1.350 -1.766 -0.108 0.000 0.911 1.852 0.987 1.167 0.820 1.903 1.443
|
||||
0 0.410 0.835 -0.819 1.257 1.112 0.871 -1.737 -0.401 0.000 0.927 0.158 1.253 0.000 1.183 0.405 -1.570 0.000 0.807 -0.704 -0.438 3.102 0.932 0.962 0.987 0.653 0.315 0.616 0.648
|
||||
1 0.634 0.196 -1.679 1.379 -0.967 2.260 -0.273 1.114 0.000 1.458 1.070 -0.278 1.107 1.195 0.110 -0.688 2.548 0.907 0.298 -1.359 0.000 0.949 1.129 0.984 0.675 0.877 0.938 0.824
|
||||
1 0.632 -1.254 1.201 0.496 -0.106 0.235 2.731 -0.955 0.000 0.615 -0.805 0.600 0.000 0.633 -0.934 1.641 0.000 1.407 -0.483 -0.962 1.551 0.778 0.797 0.989 0.578 0.722 0.576 0.539
|
||||
0 0.714 1.122 1.566 2.399 -1.431 1.665 0.299 0.323 0.000 1.489 1.087 -0.861 2.215 1.174 0.140 1.083 2.548 0.404 -0.968 1.105 0.000 0.867 0.969 0.981 1.039 1.552 1.157 1.173
|
||||
1 0.477 -0.321 -0.471 1.966 1.034 2.282 1.359 -0.874 0.000 1.672 -0.258 1.109 0.000 1.537 0.604 0.231 2.548 1.534 -0.640 0.827 0.000 0.746 1.337 1.311 0.653 0.721 0.795 0.742
|
||||
1 1.351 0.460 0.031 1.194 -1.185 0.670 -1.157 -1.637 2.173 0.599 -0.823 0.680 0.000 0.478 0.373 1.716 0.000 0.809 -0.919 0.010 1.551 0.859 0.839 1.564 0.994 0.777 0.971 0.826
|
||||
1 0.520 -1.442 -0.348 0.840 1.654 1.273 -0.760 1.317 0.000 0.861 2.579 -0.791 0.000 1.779 0.257 -0.703 0.000 2.154 1.928 0.457 0.000 1.629 3.194 0.992 0.730 1.107 2.447 2.747
|
||||
0 0.700 -0.308 0.920 0.438 -0.879 0.516 1.409 1.101 0.000 0.960 0.701 -0.049 2.215 1.442 -0.416 -1.439 2.548 0.628 1.009 -0.364 0.000 0.848 0.817 0.987 0.759 1.421 0.937 0.920
|
||||
1 0.720 1.061 -0.546 0.798 -1.521 1.066 0.173 0.271 1.087 1.453 0.114 1.336 1.107 0.702 0.616 -0.367 0.000 0.543 -0.386 -1.301 0.000 0.653 0.948 0.989 1.031 1.500 0.965 0.790
|
||||
1 0.735 -0.416 0.588 1.308 -0.382 1.042 0.344 1.609 0.000 0.926 0.163 -0.520 1.107 1.050 -0.427 1.159 2.548 0.834 0.613 0.948 0.000 0.848 1.189 1.042 0.844 1.099 0.829 0.843
|
||||
1 0.777 -0.396 1.540 1.608 0.638 0.955 0.040 0.918 2.173 1.315 1.116 -0.823 0.000 0.781 -0.762 0.564 2.548 0.945 -0.573 1.379 0.000 0.679 0.706 1.124 0.608 0.593 0.515 0.493
|
||||
1 0.934 0.319 -0.257 0.970 -0.980 0.726 0.774 0.731 0.000 0.896 0.038 -1.465 1.107 0.773 -0.055 -0.831 2.548 1.439 -0.229 0.698 0.000 0.964 1.031 0.995 0.845 0.480 0.810 0.762
|
||||
0 0.461 0.771 0.019 2.055 -1.288 1.043 0.147 0.261 2.173 0.833 -0.156 1.425 0.000 0.832 0.805 -0.491 2.548 0.589 1.252 1.414 0.000 0.850 0.906 1.245 1.364 0.850 0.908 0.863
|
||||
1 0.858 -0.116 -0.937 0.966 1.167 0.825 -0.108 1.111 1.087 0.733 1.163 -0.634 0.000 0.894 0.771 0.020 0.000 0.846 -1.124 -1.195 3.102 0.724 1.194 1.195 0.813 0.969 0.985 0.856
|
||||
0 0.720 -0.335 -0.307 1.445 0.540 1.108 -0.034 -1.691 1.087 0.883 -1.356 -0.678 2.215 0.440 1.093 0.253 0.000 0.389 -1.582 -1.097 0.000 1.113 1.034 0.988 1.256 1.572 1.062 0.904
|
||||
1 0.750 -0.811 -0.542 0.985 0.408 0.471 0.477 0.355 0.000 1.347 -0.875 -1.556 2.215 0.564 1.082 -0.724 0.000 0.793 -0.958 -0.020 3.102 0.836 0.825 0.986 1.066 0.924 0.927 0.883
|
||||
0 0.392 -0.468 -0.216 0.680 1.565 1.086 -0.765 -0.581 1.087 1.264 -1.035 1.189 2.215 0.986 -0.338 0.747 0.000 0.884 -1.328 -0.965 0.000 1.228 0.988 0.982 1.135 1.741 1.108 0.956
|
||||
1 0.434 -1.269 0.643 0.713 0.608 0.597 0.832 1.627 0.000 0.708 -0.422 0.079 2.215 1.533 -0.823 -1.127 2.548 0.408 -1.357 -0.828 0.000 1.331 1.087 0.999 1.075 1.015 0.875 0.809
|
||||
0 0.828 -1.803 0.342 0.847 -0.162 1.585 -1.128 -0.272 2.173 1.974 0.039 -1.717 0.000 0.900 0.764 -1.741 0.000 1.349 -0.079 1.035 3.102 0.984 0.815 0.985 0.780 1.661 1.403 1.184
|
||||
1 1.089 -0.350 -0.747 1.472 0.792 1.087 -0.069 -1.192 0.000 0.512 -0.841 -1.284 0.000 2.162 -0.821 0.545 2.548 1.360 2.243 -0.183 0.000 0.977 0.628 1.725 1.168 0.635 0.823 0.822
|
||||
1 0.444 0.451 -1.332 1.176 -0.247 0.898 0.194 0.007 0.000 1.958 0.576 -1.618 2.215 0.584 1.203 0.268 0.000 0.939 1.033 1.264 3.102 0.829 0.886 0.985 1.265 0.751 1.032 0.948
|
||||
0 0.629 0.114 1.177 0.917 -1.204 0.845 0.828 -0.088 0.000 0.962 -1.302 0.823 2.215 0.732 0.358 -1.334 2.548 0.538 0.582 1.561 0.000 1.028 0.834 0.988 0.904 1.205 1.039 0.885
|
||||
1 1.754 -1.259 -0.573 0.959 -1.483 0.358 0.448 -1.452 0.000 0.711 0.313 0.499 2.215 1.482 -0.390 1.474 2.548 1.879 -1.540 0.668 0.000 0.843 0.825 1.313 1.315 0.939 1.048 0.871
|
||||
1 0.549 0.706 -1.437 0.894 0.891 0.680 -0.762 -1.568 0.000 0.981 0.499 -0.425 2.215 1.332 0.678 0.485 1.274 0.803 0.022 -0.893 0.000 0.793 1.043 0.987 0.761 0.899 0.915 0.794
|
||||
0 0.475 0.542 -0.987 1.569 0.069 0.551 1.543 -1.488 0.000 0.608 0.301 1.734 2.215 0.277 0.499 -0.522 0.000 1.375 1.212 0.696 3.102 0.652 0.756 0.987 0.828 0.830 0.715 0.679
|
||||
1 0.723 0.049 -1.153 1.300 0.083 0.723 -0.749 0.630 0.000 1.126 0.412 -0.384 0.000 1.272 1.256 1.358 2.548 3.108 0.777 -1.486 3.102 0.733 1.096 1.206 1.269 0.899 1.015 0.903
|
||||
1 1.062 0.296 0.725 0.285 -0.531 0.819 1.277 -0.667 0.000 0.687 0.829 -0.092 0.000 1.158 0.447 1.047 2.548 1.444 -0.186 -1.491 3.102 0.863 1.171 0.986 0.769 0.828 0.919 0.840
|
||||
0 0.572 -0.349 1.396 2.023 0.795 0.577 0.457 -0.533 0.000 1.351 0.701 -1.091 0.000 0.724 -1.012 -0.182 2.548 0.923 -0.012 0.789 3.102 0.936 1.025 0.985 1.002 0.600 0.828 0.909
|
||||
1 0.563 0.387 0.412 0.553 1.050 0.723 -0.992 -0.447 0.000 0.748 0.948 0.546 2.215 1.761 -0.559 -1.183 0.000 1.114 -0.251 1.192 3.102 0.936 0.912 0.976 0.578 0.722 0.829 0.892
|
||||
1 1.632 1.577 -0.697 0.708 -1.263 0.863 0.012 1.197 2.173 0.498 0.990 -0.806 0.000 0.627 2.387 -1.283 0.000 0.607 1.290 -0.174 3.102 0.916 1.328 0.986 0.557 0.971 0.935 0.836
|
||||
1 0.562 -0.360 0.399 0.803 -1.334 1.443 -0.116 1.628 2.173 0.750 0.987 0.135 1.107 0.795 0.298 -0.556 0.000 1.150 -0.113 -0.093 0.000 0.493 1.332 0.985 1.001 1.750 1.013 0.886
|
||||
1 0.987 0.706 -0.492 0.861 0.607 0.593 0.088 -0.184 0.000 0.802 0.894 1.608 2.215 0.782 -0.471 1.500 2.548 0.521 0.772 -0.960 0.000 0.658 0.893 1.068 0.877 0.664 0.709 0.661
|
||||
1 1.052 0.883 -0.581 1.566 0.860 0.931 1.515 -0.873 0.000 0.493 0.145 -0.672 0.000 1.133 0.935 1.581 2.548 1.630 0.695 0.923 3.102 1.105 1.087 1.713 0.948 0.590 0.872 0.883
|
||||
1 2.130 -0.516 -0.291 0.776 -1.230 0.689 -0.257 0.800 2.173 0.730 -0.274 -1.437 0.000 0.615 0.241 1.083 0.000 0.834 0.757 1.613 3.102 0.836 0.806 1.333 1.061 0.730 0.889 0.783
|
||||
1 0.742 0.797 1.628 0.311 -0.418 0.620 0.685 -1.457 0.000 0.683 1.774 -1.082 0.000 1.700 1.104 0.225 2.548 0.382 -2.184 -1.307 0.000 0.945 1.228 0.984 0.864 0.931 0.988 0.838
|
||||
0 0.311 -1.249 -0.927 1.272 -1.262 0.642 -1.228 -0.136 0.000 1.220 -0.804 -1.558 2.215 0.950 -0.828 0.495 1.274 2.149 -1.672 0.634 0.000 1.346 0.887 0.981 0.856 1.101 1.001 1.106
|
||||
0 0.660 -1.834 -0.667 0.601 1.236 0.932 -0.933 -0.135 2.173 1.373 -0.122 1.429 0.000 0.654 -0.034 -0.847 2.548 0.711 0.911 0.703 0.000 1.144 0.942 0.984 0.822 0.739 0.992 0.895
|
||||
0 3.609 -0.590 0.851 0.615 0.455 1.280 0.003 -0.866 1.087 1.334 0.708 -1.131 0.000 0.669 0.480 0.092 0.000 0.975 0.983 -1.429 3.102 1.301 1.089 0.987 1.476 0.934 1.469 1.352
|
||||
1 0.905 -0.403 1.567 2.651 0.953 1.194 -0.241 -0.567 1.087 0.308 -0.384 -0.007 0.000 0.608 -0.175 -1.163 2.548 0.379 0.941 1.662 0.000 0.580 0.721 1.126 0.895 0.544 1.097 0.836
|
||||
1 0.983 0.255 1.093 0.905 -0.874 0.863 0.060 -0.368 0.000 0.824 -0.747 -0.633 0.000 0.614 0.961 1.052 0.000 0.792 -0.260 1.632 3.102 0.874 0.883 1.280 0.663 0.406 0.592 0.645
|
||||
1 1.160 -1.027 0.274 0.460 0.322 2.085 -1.623 -0.840 0.000 1.634 -1.046 1.182 2.215 0.492 -0.367 1.174 0.000 0.824 -0.998 1.617 0.000 0.943 0.884 1.001 1.209 1.313 1.034 0.866
|
||||
0 0.299 0.028 -1.372 1.930 -0.661 0.840 -0.979 0.664 1.087 0.535 -2.041 1.434 0.000 1.087 -1.797 0.344 0.000 0.485 -0.560 -1.105 3.102 0.951 0.890 0.980 0.483 0.684 0.730 0.706
|
||||
0 0.293 1.737 -1.418 2.074 0.794 0.679 1.024 -1.457 0.000 1.034 1.094 -0.168 1.107 0.506 1.680 -0.661 0.000 0.523 -0.042 -1.274 3.102 0.820 0.944 0.987 0.842 0.694 0.761 0.750
|
||||
0 0.457 -0.393 1.560 0.738 -0.007 0.475 -0.230 0.246 0.000 0.776 -1.264 -0.606 2.215 0.865 -0.731 -1.576 2.548 1.153 0.343 1.436 0.000 1.060 0.883 0.988 0.972 0.703 0.758 0.720
|
||||
0 0.935 -0.582 0.240 2.401 0.818 1.231 -0.618 -1.289 0.000 0.799 0.544 -0.228 2.215 0.525 -1.494 -0.969 0.000 0.609 -1.123 1.168 3.102 0.871 0.767 1.035 1.154 0.919 0.868 1.006
|
||||
1 0.902 -0.745 -1.215 1.174 -0.501 1.215 0.167 1.162 0.000 0.896 1.217 -0.976 0.000 0.585 -0.429 1.036 0.000 1.431 -0.416 0.151 3.102 0.524 0.952 0.990 0.707 0.271 0.592 0.826
|
||||
1 0.653 0.337 -0.320 1.118 -0.934 1.050 0.745 0.529 1.087 1.075 1.742 -1.538 0.000 0.585 1.090 0.973 0.000 1.091 -0.187 1.160 1.551 1.006 1.108 0.978 1.121 0.838 0.947 0.908
|
||||
0 1.157 1.401 0.340 0.395 -1.218 0.945 1.928 -0.876 0.000 1.384 0.320 1.002 1.107 1.900 1.177 -0.462 2.548 1.122 1.316 1.720 0.000 1.167 1.096 0.989 0.937 1.879 1.307 1.041
|
||||
0 0.960 0.355 -0.152 0.872 -0.338 0.391 0.348 0.956 1.087 0.469 2.664 1.409 0.000 0.756 -1.561 1.500 0.000 0.525 1.436 1.728 3.102 1.032 0.946 0.996 0.929 0.470 0.698 0.898
|
||||
1 1.038 0.274 0.825 1.198 0.963 1.078 -0.496 -1.014 2.173 0.739 -0.727 -0.151 2.215 1.035 -0.799 0.398 0.000 1.333 -0.872 -1.498 0.000 0.849 1.033 0.985 0.886 0.936 0.975 0.823
|
||||
0 0.490 0.277 0.318 1.303 0.694 1.333 -1.620 -0.563 0.000 1.459 -1.326 1.140 0.000 0.779 -0.673 -1.324 2.548 0.860 -1.247 0.043 0.000 0.857 0.932 0.992 0.792 0.278 0.841 1.498
|
||||
0 1.648 -0.688 -1.386 2.790 0.995 1.087 1.359 -0.687 0.000 1.050 -0.223 -0.261 2.215 0.613 -0.889 1.335 0.000 1.204 0.827 0.309 3.102 0.464 0.973 2.493 1.737 0.827 1.319 1.062
|
||||
0 1.510 -0.662 1.668 0.860 0.280 0.705 0.974 -1.647 1.087 0.662 -0.393 -0.225 0.000 0.610 -0.996 0.532 2.548 0.464 1.305 0.102 0.000 0.859 1.057 1.498 0.799 1.260 0.946 0.863
|
||||
1 0.850 -1.185 -0.117 0.943 -0.449 1.142 0.875 -0.030 0.000 2.223 -0.461 1.627 2.215 0.767 -1.761 -1.692 0.000 1.012 -0.727 0.639 3.102 3.649 2.062 0.985 1.478 1.087 1.659 1.358
|
||||
0 0.933 1.259 0.130 0.326 -0.890 0.306 1.136 1.142 0.000 0.964 0.705 -1.373 2.215 0.546 -0.196 -0.001 0.000 0.578 -1.169 1.004 3.102 0.830 0.836 0.988 0.837 1.031 0.749 0.655
|
||||
0 0.471 0.697 1.570 1.109 0.201 1.248 0.348 -1.448 0.000 2.103 0.773 0.686 2.215 1.451 -0.087 -0.453 2.548 1.197 -0.045 -1.026 0.000 0.793 1.094 0.987 0.851 1.804 1.378 1.089
|
||||
1 2.446 -0.701 -1.568 0.059 0.822 1.401 -0.600 -0.044 2.173 0.324 -0.001 1.344 2.215 0.913 -0.818 1.049 0.000 0.442 -1.088 -0.005 0.000 0.611 1.062 0.979 0.562 0.988 0.998 0.806
|
||||
0 0.619 2.029 0.933 0.528 -0.903 0.974 0.760 -0.311 2.173 0.825 0.658 -1.466 1.107 0.894 1.594 0.370 0.000 0.882 -0.258 1.661 0.000 1.498 1.088 0.987 0.867 1.139 0.900 0.779
|
||||
1 0.674 -0.131 -0.362 0.518 -1.574 0.876 0.442 0.145 1.087 0.497 -1.526 -1.704 0.000 0.680 2.514 -1.374 0.000 0.792 -0.479 0.773 1.551 0.573 1.198 0.984 0.800 0.667 0.987 0.832
|
||||
1 1.447 1.145 -0.937 0.307 -1.458 0.478 1.264 0.816 1.087 0.558 1.015 -0.101 2.215 0.937 -0.190 1.177 0.000 0.699 0.954 -1.512 0.000 0.877 0.838 0.990 0.873 0.566 0.646 0.713
|
||||
1 0.976 0.308 -0.844 0.436 0.610 1.253 0.149 -1.585 2.173 1.415 0.568 0.096 2.215 0.953 -0.855 0.441 0.000 0.867 -0.650 1.643 0.000 0.890 1.234 0.988 0.796 2.002 1.179 0.977
|
||||
0 0.697 0.401 -0.718 0.920 0.735 0.958 -0.172 0.168 2.173 0.872 -0.097 -1.335 0.000 0.513 -1.192 -1.710 1.274 0.426 -1.637 1.368 0.000 0.997 1.227 1.072 0.800 1.013 0.786 0.749
|
||||
1 1.305 -2.157 1.740 0.661 -0.912 0.705 -0.516 0.759 2.173 0.989 -0.716 -0.300 2.215 0.627 -1.052 -1.736 0.000 0.467 -2.467 0.568 0.000 0.807 0.964 0.988 1.427 1.012 1.165 0.926
|
||||
0 1.847 1.663 -0.618 0.280 1.258 1.462 -0.054 1.371 0.000 0.900 0.309 -0.544 0.000 0.331 -2.149 -0.341 0.000 1.091 -0.833 0.710 3.102 1.496 0.931 0.989 1.549 0.115 1.140 1.150
|
||||
0 0.410 -0.323 1.069 2.160 0.010 0.892 0.942 -1.640 2.173 0.946 0.938 1.314 0.000 1.213 -1.099 -0.794 2.548 0.650 0.053 0.056 0.000 1.041 0.916 1.063 0.985 1.910 1.246 1.107
|
||||
1 0.576 1.092 -0.088 0.777 -1.579 0.757 0.271 0.109 0.000 0.819 0.827 -1.554 2.215 1.313 2.341 -1.568 0.000 2.827 0.239 -0.338 0.000 0.876 0.759 0.986 0.692 0.457 0.796 0.791
|
||||
1 0.537 0.925 -1.406 0.306 -0.050 0.906 1.051 0.037 0.000 1.469 -0.177 -1.320 2.215 1.872 0.723 1.158 0.000 1.313 0.227 -0.501 3.102 0.953 0.727 0.978 0.755 0.892 0.932 0.781
|
||||
0 0.716 -0.065 -0.484 1.313 -1.563 0.596 -0.242 0.678 2.173 0.426 -1.909 0.616 0.000 0.885 -0.406 -1.343 2.548 0.501 -1.327 -0.340 0.000 0.470 0.728 1.109 0.919 0.881 0.665 0.692
|
||||
1 0.624 -0.389 0.128 1.636 -1.110 1.025 0.573 -0.843 2.173 0.646 -0.697 1.064 0.000 0.632 -1.442 0.961 0.000 0.863 -0.106 1.717 0.000 0.825 0.917 1.257 0.983 0.713 0.890 0.824
|
||||
0 0.484 2.101 1.714 1.131 -0.823 0.750 0.583 -1.304 1.087 0.894 0.421 0.559 2.215 0.921 -0.063 0.282 0.000 0.463 -0.474 -1.387 0.000 0.742 0.886 0.995 0.993 1.201 0.806 0.754
|
||||
0 0.570 0.339 -1.478 0.528 0.439 0.978 1.479 -1.411 2.173 0.763 1.541 -0.734 0.000 1.375 0.840 0.903 0.000 0.965 1.599 0.364 0.000 0.887 1.061 0.992 1.322 1.453 1.013 0.969
|
||||
0 0.940 1.303 1.636 0.851 -1.732 0.803 -0.030 -0.177 0.000 0.480 -0.125 -0.954 0.000 0.944 0.709 0.296 2.548 1.342 -0.418 1.197 3.102 0.853 0.989 0.979 0.873 0.858 0.719 0.786
|
||||
1 0.599 0.544 -0.238 0.816 1.043 0.857 0.660 1.128 2.173 0.864 -0.624 -0.843 0.000 1.159 0.367 0.174 0.000 1.520 -0.543 -1.508 0.000 0.842 0.828 0.984 0.759 0.895 0.918 0.791
|
||||
1 1.651 1.897 -0.914 0.423 0.315 0.453 0.619 -1.607 2.173 0.532 -0.424 0.209 1.107 0.369 2.479 0.034 0.000 0.701 0.217 0.984 0.000 0.976 0.951 1.035 0.879 0.825 0.915 0.798
|
||||
1 0.926 -0.574 -0.763 0.285 1.094 0.672 2.314 1.545 0.000 1.124 0.415 0.809 0.000 1.387 0.270 -0.949 2.548 1.547 -0.631 -0.200 3.102 0.719 0.920 0.986 0.889 0.933 0.797 0.777
|
||||
0 0.677 1.698 -0.890 0.641 -0.449 0.607 1.754 1.720 0.000 0.776 0.372 0.782 2.215 0.511 1.491 -0.480 0.000 0.547 -0.341 0.853 3.102 0.919 1.026 0.997 0.696 0.242 0.694 0.687
|
||||
0 1.266 0.602 0.958 0.487 1.256 0.709 0.843 -1.196 0.000 0.893 1.303 -0.594 1.107 1.090 1.320 0.354 0.000 0.797 1.846 1.139 0.000 0.780 0.896 0.986 0.661 0.709 0.790 0.806
|
||||
1 0.628 -0.616 -0.329 0.764 -1.150 0.477 -0.715 1.187 2.173 1.250 0.607 1.026 2.215 0.983 -0.023 -0.583 0.000 0.377 1.344 -1.015 0.000 0.744 0.954 0.987 0.837 0.841 0.795 0.694
|
||||
1 1.035 -0.828 -1.358 1.870 -1.060 1.075 0.130 0.448 2.173 0.660 0.697 0.641 0.000 0.425 1.006 -1.035 0.000 0.751 1.055 1.364 3.102 0.826 0.822 0.988 0.967 0.901 1.077 0.906
|
||||
1 0.830 0.265 -0.150 0.660 1.105 0.592 -0.557 0.908 2.173 0.670 -1.419 -0.671 0.000 1.323 -0.409 1.644 2.548 0.850 -0.033 -0.615 0.000 0.760 0.967 0.984 0.895 0.681 0.747 0.770
|
||||
1 1.395 1.100 1.167 1.088 0.218 0.400 -0.132 0.024 2.173 0.743 0.530 -1.361 2.215 0.341 -0.691 -0.238 0.000 0.396 -1.426 -0.933 0.000 0.363 0.472 1.287 0.922 0.810 0.792 0.656
|
||||
1 1.070 1.875 -1.298 1.215 -0.106 0.767 0.795 0.514 1.087 0.401 2.780 1.276 0.000 0.686 1.127 1.721 2.548 0.391 -0.259 -1.167 0.000 1.278 1.113 1.389 0.852 0.824 0.838 0.785
|
||||
0 1.114 -0.071 1.719 0.399 -1.383 0.849 0.254 0.481 0.000 0.958 -0.579 0.742 0.000 1.190 -0.140 -0.862 2.548 0.479 1.390 0.856 0.000 0.952 0.988 0.985 0.764 0.419 0.835 0.827
|
||||
0 0.714 0.376 -0.568 1.578 -1.165 0.648 0.141 0.639 2.173 0.472 0.569 1.449 1.107 0.783 1.483 0.361 0.000 0.540 -0.790 0.032 0.000 0.883 0.811 0.982 0.775 0.572 0.760 0.745
|
||||
0 0.401 -1.731 0.765 0.974 1.648 0.652 -1.024 0.191 0.000 0.544 -0.366 -1.246 2.215 0.627 0.140 1.008 2.548 0.810 0.409 0.429 0.000 0.950 0.934 0.977 0.621 0.580 0.677 0.650
|
||||
1 0.391 1.679 -1.298 0.605 -0.832 0.549 1.338 0.522 2.173 1.244 0.884 1.070 0.000 1.002 0.846 -1.345 2.548 0.783 -2.464 -0.237 0.000 4.515 2.854 0.981 0.877 0.939 1.942 1.489
|
||||
1 0.513 -0.220 -0.444 1.699 0.479 1.109 0.181 -0.999 2.173 0.883 -0.335 -1.716 2.215 1.075 -0.380 1.352 0.000 0.857 0.048 0.147 0.000 0.937 0.758 0.986 1.206 0.958 0.949 0.876
|
||||
0 1.367 -0.388 0.798 1.158 1.078 0.811 -1.024 -1.628 0.000 1.504 0.097 -0.999 2.215 1.652 -0.860 0.054 2.548 0.573 -0.142 -1.401 0.000 0.869 0.833 1.006 1.412 1.641 1.214 1.041
|
||||
1 1.545 -0.533 -1.517 1.177 1.289 2.331 -0.370 -0.073 0.000 1.295 -0.358 -0.891 2.215 0.476 0.756 0.985 0.000 1.945 -0.016 -1.651 3.102 1.962 1.692 1.073 0.656 0.941 1.312 1.242
|
||||
0 0.858 0.978 -1.258 0.286 0.161 0.729 1.230 1.087 2.173 0.561 2.670 -0.109 0.000 0.407 2.346 0.938 0.000 1.078 0.729 -0.658 3.102 0.597 0.921 0.982 0.579 0.954 0.733 0.769
|
||||
1 1.454 -1.384 0.870 0.067 0.394 1.033 -0.673 0.318 0.000 1.166 -0.763 -1.533 2.215 2.848 -0.045 -0.856 2.548 0.697 -0.140 1.134 0.000 0.931 1.293 0.977 1.541 1.326 1.201 1.078
|
||||
1 0.559 -0.913 0.486 1.104 -0.321 1.073 -0.348 1.345 0.000 0.901 -0.827 -0.842 0.000 0.739 0.047 -0.415 2.548 0.433 -1.132 1.268 0.000 0.797 0.695 0.985 0.868 0.346 0.674 0.623
|
||||
1 1.333 0.780 -0.964 0.916 1.202 1.822 -0.071 0.742 2.173 1.486 -0.399 -0.824 0.000 0.740 0.568 -0.134 0.000 0.971 -0.070 -1.589 3.102 1.278 0.929 1.421 1.608 1.214 1.215 1.137
|
||||
1 2.417 0.631 -0.317 0.323 0.581 0.841 1.524 -1.738 0.000 0.543 1.176 -0.325 0.000 0.827 0.700 0.866 0.000 0.834 -0.262 -1.702 3.102 0.932 0.820 0.988 0.646 0.287 0.595 0.589
|
||||
0 0.955 -1.242 0.938 1.104 0.474 0.798 -0.743 1.535 0.000 1.356 -1.357 -1.080 2.215 1.320 -1.396 -0.132 2.548 0.728 -0.529 -0.633 0.000 0.832 0.841 0.988 0.923 1.077 0.988 0.816
|
||||
1 1.305 -1.918 0.391 1.161 0.063 0.724 2.593 1.481 0.000 0.592 -1.207 -0.329 0.000 0.886 -0.836 -1.168 2.548 1.067 -1.481 -1.440 0.000 0.916 0.688 0.991 0.969 0.550 0.665 0.638
|
||||
0 1.201 0.071 -1.123 2.242 -1.533 0.702 -0.256 0.688 0.000 0.967 0.491 1.040 2.215 1.271 -0.558 0.095 0.000 1.504 0.676 -0.383 3.102 0.917 1.006 0.985 1.017 1.057 0.928 1.057
|
||||
0 0.994 -1.607 1.596 0.774 -1.391 0.625 -0.134 -0.862 2.173 0.746 -0.765 -0.316 2.215 1.131 -0.320 0.869 0.000 0.607 0.826 0.301 0.000 0.798 0.967 0.999 0.880 0.581 0.712 0.774
|
||||
1 0.482 -0.467 0.729 1.419 1.458 0.824 0.376 -0.242 0.000 1.368 0.023 1.459 2.215 0.826 0.669 -1.079 2.548 0.936 2.215 -0.309 0.000 1.883 1.216 0.997 1.065 0.946 1.224 1.526
|
||||
1 0.383 1.588 1.611 0.748 1.194 0.866 -0.279 -0.636 0.000 0.707 0.536 0.801 2.215 1.647 -1.155 0.367 0.000 1.292 0.303 -1.681 3.102 2.016 1.581 0.986 0.584 0.684 1.107 0.958
|
||||
0 0.629 0.203 0.736 0.671 -0.271 1.350 -0.486 0.761 2.173 0.496 -0.805 -1.718 0.000 2.393 0.044 -1.046 1.274 0.651 -0.116 -0.541 0.000 0.697 1.006 0.987 1.069 2.317 1.152 0.902
|
||||
0 0.905 -0.564 -0.570 0.263 1.096 1.219 -1.397 -1.414 1.087 1.164 -0.533 -0.208 0.000 1.459 1.965 0.784 0.000 2.220 -1.421 0.452 0.000 0.918 1.360 0.993 0.904 0.389 2.118 1.707
|
||||
1 1.676 1.804 1.171 0.529 1.175 1.664 0.354 -0.530 0.000 1.004 0.691 -1.280 2.215 0.838 0.373 0.626 2.548 1.094 1.774 0.501 0.000 0.806 1.100 0.991 0.769 0.976 0.807 0.740
|
||||
1 1.364 -1.936 0.020 1.327 0.428 1.021 -1.665 -0.907 2.173 0.818 -2.701 1.303 0.000 0.716 -0.590 -1.629 2.548 0.895 -2.280 -1.602 0.000 1.211 0.849 0.989 1.320 0.864 1.065 0.949
|
||||
0 0.629 -0.626 0.609 1.828 1.280 0.644 -0.856 -0.873 2.173 0.555 1.066 -0.640 0.000 0.477 -1.364 -1.021 2.548 1.017 0.036 0.380 0.000 0.947 0.941 0.994 1.128 0.241 0.793 0.815
|
||||
1 1.152 -0.843 0.926 1.802 0.800 2.493 -1.449 -1.127 0.000 1.737 0.833 0.488 0.000 1.026 0.929 -0.990 2.548 1.408 0.689 1.142 3.102 1.171 0.956 0.993 2.009 0.867 1.499 1.474
|
||||
0 2.204 0.081 0.008 1.021 -0.679 2.676 0.090 1.163 0.000 2.210 -1.686 -1.195 0.000 1.805 0.891 -0.148 2.548 0.450 -0.502 -1.295 3.102 6.959 3.492 1.205 0.908 0.845 2.690 2.183
|
||||
1 0.957 0.954 1.702 0.043 -0.503 1.113 0.033 -0.308 0.000 0.757 -0.363 -1.129 2.215 1.635 0.068 1.048 1.274 0.415 -2.098 0.061 0.000 1.010 0.979 0.992 0.704 1.125 0.761 0.715
|
||||
0 1.222 0.418 1.059 1.303 1.442 0.282 -1.499 -1.286 0.000 1.567 0.016 -0.164 2.215 0.451 2.229 -1.229 0.000 0.660 -0.513 -0.296 3.102 2.284 1.340 0.985 1.531 0.314 1.032 1.094
|
||||
1 0.603 1.675 -0.973 0.703 -1.709 1.023 0.652 1.296 2.173 1.078 0.363 -0.263 0.000 0.734 -0.457 -0.745 1.274 0.561 1.434 -0.042 0.000 0.888 0.771 0.984 0.847 1.234 0.874 0.777
|
||||
0 0.897 0.949 -0.848 1.115 -0.085 0.522 -1.267 -1.418 0.000 0.684 -0.599 1.474 0.000 1.176 0.922 0.641 2.548 0.470 0.103 0.148 3.102 0.775 0.697 0.984 0.839 0.358 0.847 1.008
|
||||
1 0.987 1.013 -1.504 0.468 -0.259 1.160 0.476 -0.971 2.173 1.266 0.919 0.780 0.000 0.634 1.695 0.233 0.000 0.487 -0.082 0.719 3.102 0.921 0.641 0.991 0.730 0.828 0.952 0.807
|
||||
1 0.847 1.581 -1.397 1.629 1.529 1.053 0.816 -0.344 2.173 0.895 0.779 0.332 0.000 0.750 1.311 0.419 2.548 1.604 0.844 1.367 0.000 1.265 0.798 0.989 1.328 0.783 0.930 0.879
|
||||
1 0.805 1.416 -1.327 0.397 0.589 0.488 0.982 0.843 0.000 0.664 -0.999 0.129 0.000 0.624 0.613 -0.558 0.000 1.431 -0.667 -1.561 3.102 0.959 1.103 0.989 0.590 0.632 0.926 0.798
|
||||
0 1.220 -0.313 -0.489 1.759 0.201 1.698 -0.220 0.241 2.173 1.294 1.390 -1.682 0.000 1.447 -1.623 -1.296 0.000 1.710 0.872 -1.356 3.102 1.198 0.981 1.184 0.859 2.165 1.807 1.661
|
||||
0 0.772 -0.611 -0.549 0.465 -1.528 1.103 -0.140 0.001 2.173 0.854 -0.406 1.655 0.000 0.733 -1.250 1.072 0.000 0.883 0.627 -1.132 3.102 0.856 0.927 0.987 1.094 1.013 0.938 0.870
|
||||
1 1.910 0.771 0.828 0.231 1.267 1.398 1.455 -0.295 2.173 0.837 -2.564 0.770 0.000 0.540 2.189 1.287 0.000 1.345 1.311 -1.151 0.000 0.861 0.869 0.984 1.359 1.562 1.105 0.963
|
||||
1 0.295 0.832 1.399 1.222 -0.517 2.480 0.013 1.591 0.000 2.289 0.436 0.287 2.215 1.995 -0.367 -0.409 1.274 0.375 1.367 -1.716 0.000 1.356 2.171 0.990 1.467 1.664 1.855 1.705
|
||||
1 1.228 0.339 -0.575 0.417 1.474 0.480 -1.416 -1.498 2.173 0.614 -0.933 -0.961 0.000 1.189 1.690 1.003 0.000 1.690 -1.065 0.106 3.102 0.963 1.147 0.987 1.086 0.948 0.930 0.866
|
||||
0 2.877 -1.014 1.440 0.782 0.483 1.134 -0.735 -0.196 2.173 1.123 0.084 -0.596 0.000 1.796 -0.356 1.044 2.548 1.406 1.582 -0.991 0.000 0.939 1.178 1.576 0.996 1.629 1.216 1.280
|
||||
1 2.178 0.259 1.107 0.256 1.222 0.979 -0.440 -0.538 1.087 0.496 -0.760 -0.049 0.000 1.471 1.683 -1.486 0.000 0.646 0.695 -1.577 3.102 1.093 1.070 0.984 0.608 0.889 0.962 0.866
|
||||
1 0.604 0.592 1.295 0.964 0.348 1.178 -0.016 0.832 2.173 1.626 -0.420 -0.760 0.000 0.748 0.461 -0.906 0.000 0.728 0.309 -1.269 1.551 0.852 0.604 0.989 0.678 0.949 1.021 0.878
|
||||
0 0.428 -1.352 -0.912 1.713 0.797 1.894 -1.452 0.191 2.173 2.378 2.113 -1.190 0.000 0.860 2.174 0.949 0.000 1.693 0.759 1.426 3.102 0.885 1.527 1.186 1.090 3.294 4.492 3.676
|
||||
0 0.473 0.485 0.154 1.433 -1.504 0.766 1.257 -1.302 2.173 0.414 0.119 0.238 0.000 0.805 0.242 -0.691 2.548 0.734 0.749 0.753 0.000 0.430 0.893 1.137 0.686 0.724 0.618 0.608
|
||||
1 0.763 -0.601 0.876 0.182 -1.678 0.818 0.599 0.481 2.173 0.658 -0.737 -0.553 0.000 0.857 -1.138 -1.435 0.000 1.540 -1.466 -0.447 0.000 0.870 0.566 0.989 0.728 0.658 0.821 0.726
|
||||
0 0.619 -0.273 -0.143 0.992 -1.267 0.566 0.876 -1.396 2.173 0.515 0.892 0.618 0.000 0.434 -0.902 0.862 2.548 0.490 -0.539 0.549 0.000 0.568 0.794 0.984 0.667 0.867 0.597 0.578
|
||||
0 0.793 0.970 0.324 0.570 0.816 0.761 -0.550 1.519 2.173 1.150 0.496 -0.447 0.000 0.925 0.724 1.008 1.274 1.135 -0.275 -0.843 0.000 0.829 1.068 0.978 1.603 0.892 1.041 1.059
|
||||
1 0.480 0.364 -0.067 1.906 -1.582 1.397 1.159 0.140 0.000 0.639 0.398 -1.102 0.000 1.597 -0.668 1.607 2.548 1.306 -0.797 0.288 3.102 0.856 1.259 1.297 1.022 1.032 1.049 0.939
|
||||
0 0.514 1.304 1.490 1.741 -0.220 0.648 0.155 0.535 0.000 0.562 -1.016 0.837 0.000 0.863 -0.780 -0.815 2.548 1.688 -0.130 -1.545 3.102 0.887 0.980 1.309 1.269 0.654 1.044 1.035
|
||||
0 1.225 0.333 0.656 0.893 0.859 1.037 -0.876 1.603 1.087 1.769 0.272 -0.227 2.215 1.000 0.579 -1.690 0.000 1.385 0.471 -0.860 0.000 0.884 1.207 0.995 1.097 2.336 1.282 1.145
|
||||
0 2.044 -1.472 -0.294 0.392 0.369 0.927 0.718 1.492 1.087 1.619 -0.736 0.047 2.215 1.884 -0.101 -1.540 0.000 0.548 -0.441 1.117 0.000 0.798 0.877 0.981 0.750 2.272 1.469 1.276
|
||||
0 1.037 -0.276 0.735 3.526 1.156 2.498 0.401 -0.590 1.087 0.714 -1.203 1.393 2.215 0.681 0.629 1.534 0.000 0.719 -0.355 -0.706 0.000 0.831 0.857 0.988 2.864 2.633 1.988 1.466
|
||||
1 0.651 -1.218 -0.791 0.770 -1.449 0.610 -0.535 0.960 2.173 0.380 -1.072 -0.031 2.215 0.415 2.123 -1.100 0.000 0.776 0.217 0.420 0.000 0.986 1.008 1.001 0.853 0.588 0.799 0.776
|
||||
0 1.586 -0.409 0.085 3.258 0.405 1.647 -0.674 -1.519 0.000 0.640 -1.027 -1.681 0.000 1.452 -0.444 -0.957 2.548 0.927 -0.017 1.215 3.102 0.519 0.866 0.992 0.881 0.847 1.018 1.278
|
||||
0 0.712 0.092 -0.466 0.688 1.236 0.921 -1.217 -1.022 2.173 2.236 -1.167 0.868 2.215 0.851 -1.892 -0.753 0.000 0.475 -1.216 -0.383 0.000 0.668 0.758 0.988 1.180 2.093 1.157 0.934
|
||||
0 0.419 0.471 0.974 2.805 0.235 1.473 -0.198 1.255 1.087 0.931 1.083 -0.712 0.000 1.569 1.358 -1.179 2.548 2.506 0.199 -0.842 0.000 0.929 0.991 0.992 1.732 2.367 1.549 1.430
|
||||
1 0.667 1.003 1.504 0.368 1.061 0.885 -0.318 -0.353 0.000 1.438 -1.939 0.710 0.000 1.851 0.277 -1.460 2.548 1.403 0.517 -0.157 0.000 0.883 1.019 1.000 0.790 0.859 0.938 0.841
|
||||
1 1.877 -0.492 0.372 0.441 0.955 1.034 -1.220 -0.846 1.087 0.952 -0.320 1.125 0.000 0.542 0.308 -1.261 2.548 1.018 -1.415 -1.547 0.000 1.280 0.932 0.991 1.273 0.878 0.921 0.906
|
||||
0 1.052 0.901 1.176 1.280 1.517 0.562 -1.150 -0.079 2.173 1.228 -0.308 -0.354 0.000 0.790 -1.492 -0.963 0.000 0.942 -0.672 -1.588 3.102 1.116 0.902 0.988 1.993 0.765 1.375 1.325
|
||||
1 0.518 -0.254 1.642 0.865 0.725 0.980 0.734 0.023 0.000 1.448 0.780 -1.736 2.215 0.955 0.513 -0.519 0.000 0.365 -0.444 -0.243 3.102 0.833 0.555 0.984 0.827 0.795 0.890 0.786
|
||||
0 0.870 0.815 -0.506 0.663 -0.518 0.935 0.289 -1.675 2.173 1.188 0.005 0.635 0.000 0.580 0.066 -1.455 2.548 0.580 -0.634 -0.199 0.000 0.852 0.788 0.979 1.283 0.208 0.856 0.950
|
||||
0 0.628 1.382 0.135 0.683 0.571 1.097 0.564 -0.950 2.173 0.617 -0.326 0.371 0.000 1.093 0.918 1.667 2.548 0.460 1.221 0.708 0.000 0.743 0.861 0.975 1.067 1.007 0.843 0.762
|
||||
0 4.357 0.816 -1.609 1.845 -1.288 3.292 0.726 0.324 2.173 1.528 0.583 -0.801 2.215 0.605 0.572 1.406 0.000 0.794 -0.791 0.122 0.000 0.967 1.132 1.124 3.602 2.811 2.460 1.861
|
||||
0 0.677 -1.265 1.559 0.866 -0.618 0.823 0.260 0.185 0.000 1.133 0.337 1.589 2.215 0.563 -0.830 0.510 0.000 0.777 0.117 -0.941 3.102 0.839 0.763 0.986 1.182 0.649 0.796 0.851
|
||||
0 2.466 -1.838 -1.648 1.717 1.533 1.676 -1.553 -0.109 2.173 0.670 -0.666 0.284 0.000 0.334 -2.480 0.316 0.000 0.366 -0.804 -1.298 3.102 0.875 0.894 0.997 0.548 0.770 1.302 1.079
|
||||
1 1.403 0.129 -1.307 0.688 0.306 0.579 0.753 0.814 1.087 0.474 0.694 -1.400 0.000 0.520 1.995 0.185 0.000 0.929 -0.504 1.270 3.102 0.972 0.998 1.353 0.948 0.650 0.688 0.724
|
||||
1 0.351 1.188 -0.360 0.254 -0.346 1.129 0.545 1.691 0.000 0.652 -0.039 -0.258 2.215 1.089 0.655 0.472 2.548 0.554 -0.493 1.366 0.000 0.808 1.045 0.992 0.570 0.649 0.809 0.744
|
||||
0 1.875 -0.013 -0.128 0.236 1.163 0.902 0.426 0.590 2.173 1.251 -1.210 -0.616 0.000 1.035 1.534 0.912 0.000 1.944 1.789 -1.691 0.000 0.974 1.113 0.990 0.925 1.120 0.956 0.912
|
||||
0 0.298 0.750 -0.507 1.555 1.463 0.804 1.200 -0.665 0.000 0.439 -0.829 -0.252 1.107 0.770 -1.090 0.947 2.548 1.165 -0.166 -0.763 0.000 1.140 0.997 0.988 1.330 0.555 1.005 1.012
|
||||
0 0.647 0.342 0.245 4.340 -0.157 2.229 0.068 1.170 2.173 2.133 -0.201 -1.441 0.000 1.467 0.697 -0.532 1.274 1.457 0.583 -1.640 0.000 0.875 1.417 0.976 2.512 2.390 1.794 1.665
|
||||
1 1.731 -0.803 -1.013 1.492 -0.020 1.646 -0.541 1.121 2.173 0.459 -1.251 -1.495 2.215 0.605 -1.711 -0.232 0.000 0.658 0.634 -0.068 0.000 1.214 0.886 1.738 1.833 1.024 1.192 1.034
|
||||
0 0.515 1.416 -1.089 1.697 1.426 1.414 0.941 0.027 0.000 1.480 0.133 -1.595 2.215 1.110 0.752 0.760 2.548 1.062 0.697 -0.492 0.000 0.851 0.955 0.994 1.105 1.255 1.175 1.095
|
||||
0 1.261 0.858 1.465 0.757 0.305 2.310 0.679 1.080 2.173 1.544 2.518 -0.464 0.000 2.326 0.270 -0.841 0.000 2.163 0.839 -0.500 3.102 0.715 0.825 1.170 0.980 2.371 1.527 1.221
|
||||
1 1.445 1.509 1.471 0.414 -1.285 0.767 0.864 -0.677 2.173 0.524 1.388 0.171 0.000 0.826 0.190 0.121 2.548 0.572 1.691 -1.603 0.000 0.870 0.935 0.994 0.968 0.735 0.783 0.777
|
||||
1 0.919 -0.264 -1.245 0.681 -1.722 1.022 1.010 0.097 2.173 0.685 0.403 -1.351 0.000 1.357 -0.429 1.262 1.274 0.687 1.021 -0.563 0.000 0.953 0.796 0.991 0.873 1.749 1.056 0.917
|
||||
1 0.293 -2.258 -1.427 1.191 1.202 0.394 -2.030 1.438 0.000 0.723 0.596 -0.024 2.215 0.525 -1.678 -0.290 0.000 0.788 -0.824 -1.029 3.102 0.821 0.626 0.976 1.080 0.810 0.842 0.771
|
||||
0 3.286 0.386 1.688 1.619 -1.620 1.392 -0.009 0.280 0.000 1.179 -0.776 -0.110 2.215 1.256 0.248 -1.114 2.548 0.777 0.825 -0.156 0.000 1.026 1.065 0.964 0.909 1.249 1.384 1.395
|
||||
1 1.075 0.603 0.561 0.656 -0.685 0.985 0.175 0.979 2.173 1.154 0.584 -0.886 0.000 1.084 -0.354 -1.004 2.548 0.865 1.224 1.269 0.000 1.346 1.073 1.048 0.873 1.310 1.003 0.865
|
||||
1 1.098 -0.091 1.466 1.558 0.915 0.649 1.314 -1.182 2.173 0.791 0.073 0.351 0.000 0.517 0.940 1.195 0.000 1.150 1.187 -0.692 3.102 0.866 0.822 0.980 1.311 0.394 1.119 0.890
|
||||
1 0.481 -1.042 0.148 1.135 -1.249 1.202 -0.344 0.308 1.087 0.779 -1.431 1.581 0.000 0.860 -0.860 -1.125 0.000 0.785 0.303 1.199 3.102 0.878 0.853 0.988 1.072 0.827 0.936 0.815
|
||||
0 1.348 0.497 0.318 0.806 0.976 1.393 -0.152 0.632 2.173 2.130 0.515 -1.054 0.000 0.908 0.062 -0.780 0.000 1.185 0.687 1.668 1.551 0.720 0.898 0.985 0.683 1.292 1.320 1.131
|
||||
0 2.677 -0.420 -1.685 1.828 1.433 2.040 -0.718 -0.039 0.000 0.400 -0.873 0.472 0.000 0.444 0.340 -0.830 2.548 0.431 0.768 -1.417 3.102 0.869 0.917 0.996 0.707 0.193 0.728 1.154
|
||||
1 1.300 0.586 -0.122 1.306 0.609 0.727 -0.556 -1.652 2.173 0.636 0.720 1.393 2.215 0.328 1.280 -0.390 0.000 0.386 0.752 -0.905 0.000 0.202 0.751 1.106 0.864 0.799 0.928 0.717
|
||||
0 0.637 -0.176 1.737 1.322 -0.414 0.702 -0.964 -0.680 0.000 1.054 -0.461 0.889 2.215 0.861 -0.267 0.225 0.000 1.910 -1.888 1.027 0.000 0.919 0.899 1.186 0.993 1.109 0.862 0.775
|
||||
1 0.723 -0.104 1.572 0.428 -0.840 0.655 0.544 1.401 2.173 1.522 -0.154 -0.452 2.215 0.996 0.190 0.273 0.000 1.906 -0.176 0.966 0.000 0.945 0.894 0.990 0.981 1.555 0.988 0.893
|
||||
0 2.016 -0.570 1.612 0.798 0.441 0.334 0.191 -0.909 0.000 0.939 0.146 0.021 2.215 0.553 -0.444 1.156 2.548 0.781 -1.545 -0.520 0.000 0.922 0.956 1.528 0.722 0.699 0.778 0.901
|
||||
0 1.352 -0.707 1.284 0.665 0.580 0.694 -1.040 -0.899 2.173 0.692 -2.048 0.029 0.000 0.545 -2.042 1.259 0.000 0.661 -0.808 -1.251 3.102 0.845 0.991 0.979 0.662 0.225 0.685 0.769
|
||||
1 1.057 -1.561 -0.411 0.952 -0.681 1.236 -1.107 1.045 2.173 1.288 -2.521 -0.521 0.000 1.361 -1.239 1.546 0.000 0.373 -1.540 0.028 0.000 0.794 0.782 0.987 0.889 0.832 0.972 0.828
|
||||
0 1.118 -0.017 -1.227 1.077 1.256 0.714 0.624 -0.811 0.000 0.800 0.704 0.387 1.107 0.604 0.234 0.986 0.000 1.306 -0.456 0.094 3.102 0.828 0.984 1.195 0.987 0.672 0.774 0.748
|
||||
1 0.602 2.201 0.212 0.119 0.182 0.474 2.130 1.270 0.000 0.370 2.088 -0.573 0.000 0.780 -0.725 -1.033 0.000 1.642 0.598 0.303 3.102 0.886 0.988 0.985 0.644 0.756 0.651 0.599
|
||||
0 1.677 -0.844 1.581 0.585 0.887 1.012 -2.315 0.752 0.000 1.077 0.748 -0.195 0.000 0.718 0.832 -1.337 1.274 1.181 -0.557 -1.006 3.102 1.018 1.247 0.988 0.908 0.651 1.311 1.120
|
||||
1 1.695 0.259 1.224 1.344 1.067 0.718 -1.752 -0.215 0.000 0.473 0.991 -0.993 0.000 0.891 1.285 -1.500 2.548 0.908 -0.131 0.288 0.000 0.945 0.824 0.979 1.009 0.951 0.934 0.833
|
||||
0 0.793 0.628 0.432 1.707 0.302 0.919 1.045 -0.784 0.000 1.472 0.175 -1.284 2.215 1.569 0.155 0.971 2.548 0.435 0.735 1.625 0.000 0.801 0.907 0.992 0.831 1.446 1.082 1.051
|
||||
1 0.537 -0.664 -0.244 1.104 1.272 1.154 0.394 1.633 0.000 1.527 0.963 0.559 2.215 1.744 0.650 -0.912 0.000 1.097 0.730 -0.368 3.102 1.953 1.319 1.045 1.309 0.869 1.196 1.126
|
||||
1 0.585 -1.469 1.005 0.749 -1.060 1.224 -0.717 -0.323 2.173 1.012 -0.201 1.268 0.000 0.359 -0.567 0.476 0.000 1.117 -1.124 1.557 3.102 0.636 1.281 0.986 0.616 1.289 0.890 0.881
|
||||
1 0.354 -1.517 0.667 2.534 -1.298 1.020 -0.375 1.254 0.000 1.119 -0.060 -1.538 2.215 1.059 -0.395 -0.140 0.000 2.609 0.199 -0.778 1.551 0.957 0.975 1.286 1.666 1.003 1.224 1.135
|
||||
1 0.691 -1.619 -1.380 0.361 1.727 1.493 -1.093 -0.289 0.000 1.447 -0.640 1.341 0.000 1.453 -0.617 -1.456 1.274 1.061 -1.481 -0.091 0.000 0.744 0.649 0.987 0.596 0.727 0.856 0.797
|
||||
0 1.336 1.293 -1.359 0.357 0.067 1.110 -0.058 -0.515 0.000 0.976 1.498 1.207 0.000 1.133 0.437 1.053 2.548 0.543 1.374 0.171 0.000 0.764 0.761 0.984 0.827 0.553 0.607 0.612
|
||||
0 0.417 -1.111 1.661 2.209 -0.683 1.931 -0.642 0.959 1.087 1.514 -2.032 -0.686 0.000 1.521 -0.539 1.344 0.000 0.978 -0.866 0.363 1.551 2.813 1.850 1.140 1.854 0.799 1.600 1.556
|
||||
0 1.058 0.390 -0.591 0.134 1.149 0.346 -1.550 0.186 0.000 1.108 -0.999 0.843 1.107 1.124 0.415 -1.514 0.000 1.067 -0.426 -1.000 3.102 1.744 1.050 0.985 1.006 1.010 0.883 0.789
|
||||
1 1.655 0.253 1.216 0.270 1.703 0.500 -0.006 -1.418 2.173 0.690 -0.350 0.170 2.215 1.045 -0.924 -0.774 0.000 0.996 -0.745 -0.123 0.000 0.839 0.820 0.993 0.921 0.869 0.725 0.708
|
||||
0 1.603 -0.850 0.564 0.829 0.093 1.270 -1.113 -1.155 2.173 0.853 -1.021 1.248 2.215 0.617 -1.270 1.733 0.000 0.935 -0.092 0.136 0.000 1.011 1.074 0.977 0.823 1.269 1.054 0.878
|
||||
0 1.568 -0.792 1.005 0.545 0.896 0.895 -1.698 -0.988 0.000 0.608 -1.634 1.705 0.000 0.826 0.208 0.618 1.274 2.063 -1.743 -0.520 0.000 0.939 0.986 0.990 0.600 0.435 1.033 1.087
|
||||
0 0.489 -1.335 -1.102 1.738 1.028 0.628 -0.992 -0.627 0.000 0.652 -0.064 -0.215 0.000 1.072 0.173 -1.251 2.548 1.042 0.057 0.841 3.102 0.823 0.895 1.200 1.164 0.770 0.837 0.846
|
||||
1 1.876 0.870 1.234 0.556 -1.262 1.764 0.855 -0.467 2.173 1.079 1.351 0.852 0.000 0.773 0.383 0.874 0.000 1.292 0.829 -1.228 3.102 0.707 0.969 1.102 1.601 1.017 1.112 1.028
|
||||
0 1.033 0.407 -0.374 0.705 -1.254 0.690 -0.231 1.502 2.173 0.433 -2.009 -0.057 0.000 0.861 1.151 0.334 0.000 0.960 -0.839 1.299 3.102 2.411 1.480 0.982 0.995 0.377 1.012 0.994
|
||||
0 1.092 0.653 -0.801 0.463 0.426 0.529 -1.055 0.040 0.000 0.663 0.999 1.255 1.107 0.749 -1.106 1.185 2.548 0.841 -0.745 -1.029 0.000 0.841 0.743 0.988 0.750 1.028 0.831 0.868
|
||||
1 0.799 -0.285 -0.011 0.531 1.392 1.063 0.854 0.494 2.173 1.187 -1.065 -0.851 0.000 0.429 -0.296 1.072 0.000 0.942 -1.985 1.172 0.000 0.873 0.693 0.992 0.819 0.689 1.131 0.913
|
||||
0 0.503 1.973 -0.377 1.515 -1.514 0.708 1.081 -0.313 2.173 1.110 -0.417 0.839 0.000 0.712 -1.153 1.165 0.000 0.675 -0.303 -0.930 1.551 0.709 0.761 1.032 0.986 0.698 0.963 1.291
|
||||
0 0.690 -0.574 -1.608 1.182 1.118 0.557 -2.243 0.144 0.000 0.969 0.216 -1.383 1.107 1.054 0.888 -0.709 2.548 0.566 1.663 -0.550 0.000 0.752 1.528 0.987 1.408 0.740 1.290 1.123
|
||||
1 0.890 1.501 0.786 0.779 -0.615 1.126 0.716 1.541 2.173 0.887 0.728 -0.673 2.215 1.216 0.332 -0.020 0.000 0.965 1.828 0.101 0.000 0.827 0.715 1.099 1.088 1.339 0.924 0.878
|
||||
0 0.566 0.883 0.655 1.600 0.034 1.155 2.028 -1.499 0.000 0.723 -0.871 0.763 0.000 1.286 -0.696 -0.676 2.548 1.134 -0.113 1.207 3.102 4.366 2.493 0.984 0.960 0.962 1.843 1.511
|
||||
0 1.146 1.086 -0.911 0.838 1.298 0.821 0.127 -0.145 0.000 1.352 0.474 -1.580 2.215 1.619 -0.081 0.675 2.548 1.382 -0.748 0.127 0.000 0.958 0.976 1.239 0.876 1.481 1.116 1.076
|
||||
0 1.739 -0.326 -1.661 0.420 -1.705 1.193 -0.031 -1.212 2.173 1.783 -0.442 0.522 0.000 1.064 -0.692 0.027 0.000 1.314 0.359 -0.037 3.102 0.968 0.897 0.986 0.907 1.196 1.175 1.112
|
||||
1 0.669 0.194 -0.703 0.657 -0.260 0.899 -2.511 0.311 0.000 1.482 0.773 0.974 2.215 3.459 0.037 -1.299 1.274 2.113 0.067 1.516 0.000 0.740 0.871 0.979 1.361 2.330 1.322 1.046
|
||||
1 1.355 -1.033 -1.173 0.552 -0.048 0.899 -0.482 -1.287 2.173 1.422 -1.227 0.390 1.107 1.937 -0.028 0.914 0.000 0.849 -0.230 -1.734 0.000 0.986 1.224 1.017 1.051 1.788 1.150 1.009
|
||||
1 0.511 -0.202 1.029 0.780 1.154 0.816 0.532 -0.731 0.000 0.757 0.517 0.749 2.215 1.302 0.289 -1.188 0.000 0.584 1.211 -0.350 0.000 0.876 0.943 0.995 0.963 0.256 0.808 0.891
|
||||
1 1.109 0.572 1.484 0.753 1.543 1.711 -0.145 -0.746 1.087 1.759 0.631 0.845 2.215 0.945 0.542 0.003 0.000 0.378 -1.150 -0.044 0.000 0.764 1.042 0.992 1.045 2.736 1.441 1.140
|
||||
0 0.712 -0.025 0.553 0.928 -0.711 1.304 0.045 -0.300 0.000 0.477 0.720 0.969 0.000 1.727 -0.474 1.328 1.274 1.282 2.222 1.684 0.000 0.819 0.765 1.023 0.961 0.657 0.799 0.744
|
||||
1 1.131 -0.302 1.079 0.901 0.236 0.904 -0.249 1.694 2.173 1.507 -0.702 -1.128 0.000 0.774 0.565 0.284 2.548 1.802 1.446 -0.192 0.000 3.720 2.108 0.986 0.930 1.101 1.484 1.238
|
||||
0 1.392 1.253 0.118 0.864 -1.358 0.922 -0.447 -1.243 1.087 1.969 1.031 0.774 2.215 1.333 -0.359 -0.681 0.000 1.099 -0.257 1.473 0.000 1.246 0.909 1.475 1.234 2.531 1.449 1.306
|
||||
0 1.374 2.291 -0.479 1.339 -0.243 0.687 2.345 1.310 0.000 0.467 1.081 0.772 0.000 0.656 1.155 -1.636 2.548 0.592 0.536 -1.269 3.102 0.981 0.821 1.010 0.877 0.217 0.638 0.758
|
||||
1 0.401 -1.516 0.909 2.738 0.519 0.887 0.566 -1.202 0.000 0.909 -0.176 1.682 0.000 2.149 -0.878 -0.514 2.548 0.929 -0.563 -1.555 3.102 1.228 0.803 0.980 1.382 0.884 1.025 1.172
|
||||
1 0.430 -1.589 1.417 2.158 1.226 1.180 -0.829 -0.781 2.173 0.798 1.400 -0.111 0.000 0.939 -0.878 1.076 2.548 0.576 1.335 -0.826 0.000 0.861 0.970 0.982 1.489 1.308 1.015 0.992
|
||||
1 1.943 -0.391 -0.840 0.621 -1.613 2.026 1.734 1.025 0.000 0.930 0.573 -0.912 0.000 1.326 0.847 -0.220 1.274 1.181 0.079 0.709 3.102 1.164 1.007 0.987 1.094 0.821 0.857 0.786
|
||||
1 0.499 0.436 0.887 0.859 1.509 0.733 -0.559 1.111 1.087 1.011 -0.796 0.279 2.215 1.472 -0.510 -0.982 0.000 1.952 0.379 -0.733 0.000 1.076 1.358 0.991 0.589 0.879 1.068 0.922
|
||||
0 0.998 -0.407 -1.711 0.139 0.652 0.810 -0.331 -0.721 0.000 0.471 -0.533 0.442 0.000 0.531 -1.405 0.120 2.548 0.707 0.098 -1.176 1.551 1.145 0.809 0.988 0.529 0.612 0.562 0.609
|
||||
1 1.482 0.872 0.638 1.288 0.362 0.856 0.900 -0.511 1.087 1.072 1.061 -1.432 2.215 1.770 -2.292 -1.547 0.000 1.131 1.374 0.783 0.000 6.316 4.381 1.002 1.317 1.048 2.903 2.351
|
||||
1 2.084 -0.422 1.289 1.125 0.735 1.104 -0.518 -0.326 2.173 0.413 -0.719 -0.699 0.000 0.857 0.108 -1.631 0.000 0.527 0.641 -1.362 3.102 0.791 0.952 1.016 0.776 0.856 0.987 0.836
|
||||
0 0.464 0.674 0.025 0.430 -1.703 0.982 -1.311 -0.808 2.173 1.875 1.060 0.821 2.215 0.954 -0.480 -1.677 0.000 0.567 0.702 -0.939 0.000 0.781 1.076 0.989 1.256 3.632 1.652 1.252
|
||||
1 0.457 -1.944 -1.010 1.409 0.931 1.098 -0.742 -0.415 0.000 1.537 -0.834 0.945 2.215 1.752 -0.287 -1.269 2.548 0.692 -1.537 -0.223 0.000 0.801 1.192 1.094 1.006 1.659 1.175 1.122
|
||||
0 3.260 -0.943 1.737 0.920 1.309 0.946 -0.139 -0.271 2.173 0.994 -0.952 -0.311 0.000 0.563 -0.136 -0.881 0.000 1.236 -0.507 0.906 1.551 0.747 0.869 0.985 1.769 1.034 1.179 1.042
|
||||
0 0.615 -0.778 0.246 1.861 1.619 0.560 -0.943 -0.204 2.173 0.550 -0.759 -1.342 2.215 0.578 0.076 -0.973 0.000 0.939 0.035 0.680 0.000 0.810 0.747 1.401 0.772 0.702 0.719 0.662
|
||||
1 2.370 -0.064 -0.237 1.737 0.154 2.319 -1.838 -1.673 0.000 1.053 -1.305 -0.075 0.000 0.925 0.149 0.318 1.274 0.851 -0.922 0.981 3.102 0.919 0.940 0.989 0.612 0.598 1.219 1.626
|
||||
1 1.486 0.311 -1.262 1.354 -0.847 0.886 -0.158 1.213 2.173 1.160 -0.218 0.239 0.000 1.166 0.494 0.278 2.548 0.575 1.454 -1.701 0.000 0.429 1.129 0.983 1.111 1.049 1.006 0.920
|
||||
1 1.294 1.587 -0.864 0.487 -0.312 0.828 1.051 -0.031 1.087 2.443 1.216 1.609 2.215 1.167 0.813 0.921 0.000 1.751 -0.415 0.119 0.000 1.015 1.091 0.974 1.357 2.093 1.178 1.059
|
||||
1 0.984 0.465 -1.661 0.379 -0.554 0.977 0.237 0.365 0.000 0.510 0.143 1.101 0.000 1.099 -0.662 -1.593 2.548 1.104 -0.197 -0.648 3.102 0.925 0.922 0.986 0.642 0.667 0.806 0.722
|
||||
1 0.930 -0.009 0.047 0.667 1.367 1.065 -0.231 0.815 0.000 1.199 -1.114 -0.877 2.215 0.940 0.824 -1.583 0.000 1.052 -0.407 -0.076 1.551 1.843 1.257 1.013 1.047 0.751 1.158 0.941
|
||||
0 0.767 -0.011 -0.637 0.341 -1.437 1.438 -0.425 -0.450 2.173 1.073 -0.718 1.341 2.215 0.633 -1.394 0.486 0.000 0.603 -1.945 -1.626 0.000 0.703 0.790 0.984 1.111 1.848 1.129 1.072
|
||||
1 1.779 0.017 0.432 0.402 1.022 0.959 1.480 1.595 2.173 1.252 1.365 0.006 0.000 1.188 -0.174 -1.107 0.000 1.181 0.518 -0.258 0.000 1.057 0.910 0.991 1.616 0.779 1.158 1.053
|
||||
0 0.881 0.630 1.029 1.990 0.508 1.102 0.742 -1.298 2.173 1.565 1.085 0.686 2.215 2.691 1.391 -0.904 0.000 0.499 1.388 -1.199 0.000 0.347 0.861 0.997 0.881 1.920 1.233 1.310
|
||||
0 1.754 -0.266 0.389 0.347 -0.030 0.462 -1.408 -0.957 2.173 0.515 -2.341 -1.700 0.000 0.588 -0.797 1.355 2.548 0.608 0.329 -1.389 0.000 1.406 0.909 0.988 0.760 0.593 0.768 0.847
|
||||
0 1.087 0.311 -1.447 0.173 0.567 0.854 0.362 0.584 0.000 1.416 -0.716 -1.211 2.215 0.648 -0.358 -0.692 1.274 0.867 -0.513 0.206 0.000 0.803 0.813 0.984 1.110 0.491 0.921 0.873
|
||||
0 0.279 1.114 -1.190 3.004 -0.738 1.233 0.896 1.092 2.173 0.454 -0.374 0.117 2.215 0.357 0.119 1.270 0.000 0.458 1.343 0.316 0.000 0.495 0.540 0.988 1.715 1.139 1.618 1.183
|
||||
1 1.773 -0.694 -1.518 2.306 -1.200 3.104 0.749 0.362 0.000 1.871 0.230 -1.686 2.215 0.805 -0.179 -0.871 1.274 0.910 0.607 -0.246 0.000 1.338 1.598 0.984 1.050 0.919 1.678 1.807
|
||||
0 0.553 0.683 0.827 0.973 -0.706 1.488 0.149 1.140 2.173 1.788 0.447 -0.478 0.000 0.596 1.043 1.607 0.000 0.373 -0.868 -1.308 1.551 1.607 1.026 0.998 1.134 0.808 1.142 0.936
|
||||
1 0.397 1.101 -1.139 1.688 0.146 0.972 0.541 1.518 0.000 1.549 -0.873 -1.012 0.000 2.282 -0.151 0.314 2.548 1.174 0.033 -1.368 0.000 0.937 0.776 1.039 1.143 0.959 0.986 1.013
|
||||
1 0.840 1.906 -0.959 0.869 0.576 0.642 0.554 -1.351 0.000 0.756 0.923 -0.823 2.215 1.251 1.130 0.545 2.548 1.513 0.410 1.073 0.000 1.231 0.985 1.163 0.812 0.987 0.816 0.822
|
||||
1 0.477 1.665 0.814 0.763 -0.382 0.828 -0.008 0.280 2.173 1.213 -0.001 1.560 0.000 1.136 0.311 -1.289 0.000 0.797 1.091 -0.616 3.102 1.026 0.964 0.992 0.772 0.869 0.916 0.803
|
||||
0 2.655 0.020 0.273 1.464 0.482 1.709 -0.107 -1.456 2.173 0.825 0.141 -0.386 0.000 1.342 -0.592 1.635 1.274 0.859 -0.175 -0.874 0.000 0.829 0.946 1.003 2.179 0.836 1.505 1.176
|
||||
0 0.771 -1.992 -0.720 0.732 -1.464 0.869 -1.290 0.388 2.173 0.926 -1.072 -1.489 2.215 0.640 -1.232 0.840 0.000 0.528 -2.440 -0.446 0.000 0.811 0.868 0.993 0.995 1.317 0.809 0.714
|
||||
0 1.357 1.302 0.076 0.283 -1.060 0.783 1.559 -0.994 0.000 0.947 1.212 1.617 0.000 1.127 0.311 0.442 2.548 0.582 -0.052 1.186 1.551 1.330 0.995 0.985 0.846 0.404 0.858 0.815
|
||||
0 0.442 -0.381 -0.424 1.244 0.591 0.731 0.605 -0.713 2.173 0.629 2.762 1.040 0.000 0.476 2.693 -0.617 0.000 0.399 0.442 1.486 3.102 0.839 0.755 0.988 0.869 0.524 0.877 0.918
|
||||
0 0.884 0.422 0.055 0.818 0.624 0.950 -0.763 1.624 0.000 0.818 -0.609 -1.166 0.000 1.057 -0.528 1.070 2.548 1.691 -0.124 -0.335 3.102 1.104 0.933 0.985 0.913 1.000 0.863 1.056
|
||||
0 1.276 0.156 1.714 1.053 -1.189 0.672 -0.464 -0.030 2.173 0.469 -2.483 0.442 0.000 0.564 2.580 -0.253 0.000 0.444 -0.628 1.080 1.551 5.832 2.983 0.985 1.162 0.494 1.809 1.513
|
||||
0 1.106 -0.556 0.406 0.573 -1.400 0.769 -0.518 1.457 2.173 0.743 -0.352 -0.010 0.000 1.469 -0.550 -0.930 2.548 0.540 1.236 -0.571 0.000 0.962 0.970 1.101 0.805 1.107 0.873 0.773
|
||||
0 0.539 -0.964 -0.464 1.371 -1.606 0.667 -0.160 0.655 0.000 0.952 0.352 -0.740 2.215 0.952 0.007 1.123 0.000 1.061 -0.505 1.389 3.102 1.063 0.991 1.019 0.633 0.967 0.732 0.799
|
||||
1 0.533 -0.989 -1.608 0.462 -1.723 1.204 -0.598 -0.098 2.173 1.343 -0.460 1.632 2.215 0.577 0.221 -0.492 0.000 0.628 -0.073 0.472 0.000 0.518 0.880 0.988 1.179 1.874 1.041 0.813
|
||||
1 1.024 1.075 -0.795 0.286 -1.436 1.365 0.857 -0.309 2.173 0.804 1.532 1.435 0.000 1.511 0.722 1.494 0.000 1.778 0.903 0.753 1.551 0.686 0.810 0.999 0.900 1.360 1.133 0.978
|
||||
1 2.085 -0.269 -1.423 0.789 1.298 0.281 1.652 0.187 0.000 0.658 -0.760 -0.042 2.215 0.663 0.024 0.120 0.000 0.552 -0.299 -0.428 3.102 0.713 0.811 1.130 0.705 0.218 0.675 0.743
|
||||
1 0.980 -0.443 0.813 0.785 -1.253 0.719 0.448 -1.458 0.000 1.087 0.595 0.635 1.107 1.428 0.029 -0.995 0.000 1.083 1.562 -0.092 0.000 0.834 0.891 1.165 0.967 0.661 0.880 0.817
|
||||
1 0.903 -0.733 -0.980 0.634 -0.639 0.780 0.266 -0.287 2.173 1.264 -0.936 1.004 0.000 1.002 -0.056 -1.344 2.548 1.183 -0.098 1.169 0.000 0.733 1.002 0.985 0.711 0.916 0.966 0.875
|
||||
0 0.734 -0.304 -1.175 2.851 1.674 0.904 -0.634 0.412 2.173 1.363 -1.050 -0.282 0.000 1.476 -1.603 0.103 0.000 2.231 -0.718 1.708 3.102 0.813 0.896 1.088 0.686 1.392 1.033 1.078
|
||||
1 1.680 0.591 -0.243 0.111 -0.478 0.326 -0.079 -1.555 2.173 0.711 0.714 0.922 2.215 0.355 0.858 1.682 0.000 0.727 1.620 1.360 0.000 0.334 0.526 1.001 0.862 0.633 0.660 0.619
|
||||
1 1.163 0.225 -0.202 0.501 -0.979 1.609 -0.938 1.424 0.000 1.224 -0.118 -1.274 0.000 2.034 1.241 -0.254 0.000 1.765 0.536 0.237 3.102 0.894 0.838 0.988 0.693 0.579 0.762 0.726
|
||||
0 1.223 1.232 1.471 0.489 1.728 0.703 -0.111 0.411 0.000 1.367 1.014 -1.294 1.107 1.524 -0.414 -0.164 2.548 1.292 0.833 0.316 0.000 0.861 0.752 0.994 0.836 1.814 1.089 0.950
|
||||
0 0.816 1.637 -1.557 1.036 -0.342 0.913 1.333 0.949 2.173 0.812 0.756 -0.628 2.215 1.333 0.470 1.495 0.000 1.204 -2.222 -1.675 0.000 1.013 0.924 1.133 0.758 1.304 0.855 0.860
|
||||
0 0.851 -0.564 -0.691 0.692 1.345 1.219 1.014 0.318 0.000 1.422 -0.262 -1.635 2.215 0.531 1.802 0.008 0.000 0.508 0.515 -1.267 3.102 0.821 0.787 1.026 0.783 0.432 1.149 1.034
|
||||
0 0.800 -0.599 0.204 0.552 -0.484 0.974 0.413 0.961 2.173 1.269 -0.984 -1.039 2.215 0.380 -1.213 1.371 0.000 0.551 0.332 -0.659 0.000 0.694 0.852 0.984 1.057 2.037 1.096 0.846
|
||||
0 0.744 -0.071 -0.255 0.638 0.512 1.125 0.407 0.844 2.173 0.860 -0.481 -0.677 0.000 1.102 0.181 -1.194 0.000 1.011 -1.081 -1.713 3.102 0.854 0.862 0.982 1.111 1.372 1.042 0.920
|
||||
1 0.400 1.049 -0.625 0.880 -0.407 1.040 2.150 -1.359 0.000 0.747 -0.144 0.847 2.215 0.560 -1.829 0.698 0.000 1.663 -0.668 0.267 0.000 0.845 0.964 0.996 0.820 0.789 0.668 0.668
|
||||
0 1.659 -0.705 -1.057 1.803 -1.436 1.008 0.693 0.005 0.000 0.895 -0.007 0.681 1.107 1.085 0.125 1.476 2.548 1.214 1.068 0.486 0.000 0.867 0.919 0.986 1.069 0.692 1.026 1.313
|
||||
0 0.829 -0.153 0.861 0.615 -0.548 0.589 1.077 -0.041 2.173 1.056 0.763 -1.737 0.000 0.639 0.970 0.725 0.000 0.955 1.227 -0.799 3.102 1.020 1.024 0.985 0.750 0.525 0.685 0.671
|
||||
1 0.920 -0.806 -0.840 1.048 0.278 0.973 -0.077 -1.364 2.173 1.029 0.309 0.133 0.000 1.444 1.484 1.618 1.274 1.419 -0.482 0.417 0.000 0.831 1.430 1.151 1.829 1.560 1.343 1.224
|
||||
1 0.686 0.249 -0.905 0.343 -1.731 0.724 -2.823 -0.901 0.000 0.982 0.303 1.312 1.107 1.016 0.245 0.610 0.000 1.303 -0.557 -0.360 3.102 1.384 1.030 0.984 0.862 1.144 0.866 0.779
|
||||
0 1.603 0.444 0.508 0.586 0.401 0.610 0.467 -1.735 2.173 0.914 0.626 -1.019 0.000 0.812 0.422 -0.408 2.548 0.902 1.679 1.490 0.000 1.265 0.929 0.990 1.004 0.816 0.753 0.851
|
||||
1 0.623 0.780 -0.203 0.056 0.015 0.899 0.793 1.326 1.087 0.803 1.478 -1.499 2.215 1.561 1.492 -0.120 0.000 0.904 0.795 0.137 0.000 0.548 1.009 0.850 0.924 0.838 0.914 0.860
|
||||
0 1.654 -2.032 -1.160 0.859 -1.583 0.689 -1.965 0.891 0.000 0.646 -1.014 -0.288 2.215 0.630 -0.815 0.402 0.000 0.638 0.316 0.655 3.102 0.845 0.879 0.993 1.067 0.625 1.041 0.958
|
||||
1 0.828 -1.269 -1.203 0.744 -0.213 0.626 -1.017 -0.404 0.000 1.281 -0.931 1.733 2.215 0.699 -0.351 1.287 0.000 1.251 -1.171 0.197 0.000 0.976 1.186 0.987 0.646 0.655 0.733 0.671
|
||||
1 0.677 0.111 1.090 1.580 1.591 1.560 0.654 -0.341 2.173 0.794 -0.266 0.702 0.000 0.823 0.651 -1.239 2.548 0.730 1.467 -1.530 0.000 1.492 1.023 0.983 1.909 1.022 1.265 1.127
|
||||
1 0.736 0.882 -1.060 0.589 0.168 1.663 0.781 1.022 2.173 2.025 1.648 -1.292 0.000 1.240 0.924 -0.421 1.274 1.354 0.065 0.501 0.000 0.316 0.925 0.988 0.664 1.736 0.992 0.807
|
||||
1 1.040 -0.822 1.638 0.974 -0.674 0.393 0.830 0.011 2.173 0.770 -0.140 -0.402 0.000 0.294 -0.133 0.030 0.000 1.220 0.807 0.638 0.000 0.826 1.063 1.216 1.026 0.705 0.934 0.823
|
||||
1 0.711 0.602 0.048 1.145 0.966 0.934 0.263 -1.589 2.173 0.971 -0.496 -0.421 1.107 0.628 -0.865 0.845 0.000 0.661 -0.008 -0.565 0.000 0.893 0.705 0.988 0.998 1.339 0.908 0.872
|
||||
1 0.953 -1.651 -0.167 0.885 1.053 1.013 -1.239 0.133 0.000 1.884 -1.122 1.222 2.215 1.906 -0.860 -1.184 1.274 1.413 -0.668 -1.647 0.000 1.873 1.510 1.133 1.050 1.678 1.246 1.061
|
||||
1 0.986 -0.892 -1.380 0.917 1.134 0.950 -1.162 -0.469 0.000 0.569 -1.393 0.215 0.000 0.320 2.667 1.712 0.000 1.570 -0.375 1.457 3.102 0.925 1.128 1.011 0.598 0.824 0.913 0.833
|
||||
1 1.067 0.099 1.154 0.527 -0.789 1.085 0.623 -1.602 2.173 1.511 -0.230 0.022 2.215 0.269 -0.377 0.883 0.000 0.571 -0.540 -0.512 0.000 0.414 0.803 1.022 0.959 2.053 1.041 0.780
|
||||
0 0.825 -2.118 0.217 1.453 -0.493 0.819 0.313 -0.942 0.000 2.098 -0.725 1.096 2.215 0.484 1.336 1.458 0.000 0.482 0.100 1.163 0.000 0.913 0.536 0.990 1.679 0.957 1.095 1.143
|
||||
1 1.507 0.054 1.120 0.698 -1.340 0.912 0.384 0.015 1.087 0.720 0.247 -0.820 0.000 0.286 0.154 1.578 2.548 0.629 1.582 -0.576 0.000 0.828 0.893 1.136 0.514 0.632 0.699 0.709
|
||||
1 0.610 1.180 -0.993 0.816 0.301 0.932 0.758 1.539 0.000 0.726 -0.830 0.248 2.215 0.883 0.857 -1.305 0.000 1.338 1.009 -0.252 3.102 0.901 1.074 0.987 0.875 1.159 1.035 0.858
|
||||
1 1.247 -1.360 1.502 1.525 -1.332 0.618 1.063 0.755 0.000 0.582 -0.155 0.473 2.215 1.214 -0.422 -0.551 2.548 0.838 -1.171 -1.166 0.000 2.051 1.215 1.062 1.091 0.725 0.896 1.091
|
||||
0 0.373 -0.600 1.291 2.573 0.207 0.765 -0.209 1.667 0.000 0.668 0.724 -1.499 0.000 1.045 -0.338 -0.754 2.548 0.558 -0.469 0.029 3.102 0.868 0.939 1.124 0.519 0.383 0.636 0.838
|
||||
0 0.791 0.336 -0.307 0.494 1.213 1.158 0.336 1.081 2.173 0.918 1.289 -0.449 0.000 0.735 -0.521 -0.969 0.000 1.052 0.499 -1.188 3.102 0.699 1.013 0.987 0.622 1.050 0.712 0.661
|
||||
0 1.321 0.856 0.464 0.202 0.901 1.144 0.120 -1.651 0.000 0.803 0.577 -0.509 2.215 0.695 -0.114 0.423 2.548 0.621 1.852 -0.420 0.000 0.697 0.964 0.983 0.527 0.659 0.719 0.729
|
||||
0 0.563 2.081 0.913 0.982 -0.533 0.549 -0.481 -1.730 0.000 0.962 0.921 0.569 2.215 0.731 1.184 -0.679 1.274 0.918 0.931 -1.432 0.000 1.008 0.919 0.993 0.895 0.819 0.810 0.878
|
||||
1 1.148 0.345 0.953 0.921 0.617 0.991 1.103 -0.484 0.000 0.970 1.978 1.525 0.000 1.150 0.689 -0.757 2.548 0.517 0.995 1.245 0.000 1.093 1.140 0.998 1.006 0.756 0.864 0.838
|
||||
1 1.400 0.128 -1.695 1.169 1.070 1.094 -0.345 -0.249 0.000 1.224 0.364 -0.036 2.215 1.178 0.530 -1.544 0.000 1.334 0.933 1.604 0.000 0.560 1.267 1.073 0.716 0.780 0.832 0.792
|
||||
0 0.330 -2.133 1.403 0.628 0.379 1.686 -0.995 0.030 1.087 2.071 0.127 -0.457 0.000 4.662 -0.855 1.477 0.000 2.072 -0.917 -1.416 3.102 5.403 3.074 0.977 0.936 1.910 2.325 1.702
|
||||
0 0.989 0.473 0.968 1.970 1.368 0.844 0.574 -0.290 2.173 0.866 -0.345 -1.019 0.000 1.130 0.605 -0.752 0.000 0.956 -0.888 0.870 3.102 0.885 0.886 0.982 1.157 1.201 1.100 1.068
|
||||
1 0.773 0.418 0.753 1.388 1.070 1.104 -0.378 -0.758 0.000 1.027 0.397 -0.496 2.215 1.234 0.027 1.084 2.548 0.936 0.209 1.677 0.000 1.355 1.020 0.983 0.550 1.206 0.916 0.931
|
||||
0 0.319 2.015 1.534 0.570 -1.134 0.632 0.124 0.757 0.000 0.477 0.598 -1.109 1.107 0.449 0.438 -0.755 2.548 0.574 -0.659 0.691 0.000 0.440 0.749 0.985 0.517 0.158 0.505 0.522
|
||||
0 1.215 1.453 -1.386 1.276 1.298 0.643 0.570 -0.196 2.173 0.588 2.104 0.498 0.000 0.617 -0.296 -0.801 2.548 0.452 0.110 0.313 0.000 0.815 0.953 1.141 1.166 0.547 0.892 0.807
|
||||
1 1.257 -1.869 -0.060 0.265 0.653 1.527 -0.346 1.163 2.173 0.758 -2.119 -0.604 0.000 1.473 -1.133 -1.290 2.548 0.477 -0.428 -0.066 0.000 0.818 0.841 0.984 1.446 1.729 1.211 1.054
|
||||
1 1.449 0.464 1.585 1.418 -1.488 1.540 0.942 0.087 0.000 0.898 0.402 -0.631 2.215 0.753 0.039 -1.729 0.000 0.859 0.849 -1.054 0.000 0.791 0.677 0.995 0.687 0.527 0.703 0.606
|
||||
1 1.084 -1.997 0.900 1.333 1.024 0.872 -0.864 -1.500 2.173 1.072 -0.813 -0.421 2.215 0.924 0.478 0.304 0.000 0.992 -0.398 -1.022 0.000 0.741 1.085 0.980 1.221 1.176 1.032 0.961
|
||||
0 1.712 1.129 0.125 1.120 -1.402 1.749 0.951 -1.575 2.173 1.711 0.445 0.578 0.000 1.114 0.234 -1.011 0.000 1.577 -0.088 0.086 3.102 2.108 1.312 1.882 1.597 2.009 1.441 1.308
|
||||
0 0.530 0.248 1.622 1.450 -1.012 1.221 -1.154 -0.763 2.173 1.698 -0.586 0.733 0.000 0.889 1.042 1.038 1.274 0.657 0.008 0.701 0.000 0.430 1.005 0.983 0.930 2.264 1.357 1.146
|
||||
1 0.921 1.735 0.883 0.699 -1.614 0.821 1.463 0.319 1.087 1.099 0.814 -1.600 2.215 1.375 0.702 -0.691 0.000 0.869 1.326 -0.790 0.000 0.980 0.900 0.988 0.832 1.452 0.816 0.709
|
||||
0 2.485 -0.823 -0.297 0.886 -1.404 0.989 0.835 1.615 2.173 0.382 0.588 -0.224 0.000 1.029 -0.456 1.546 2.548 0.613 -0.359 -0.789 0.000 0.768 0.977 1.726 2.007 0.913 1.338 1.180
|
||||
1 0.657 -0.069 -0.078 1.107 1.549 0.804 1.335 -1.630 2.173 1.271 0.481 0.153 1.107 1.028 0.144 -0.762 0.000 1.098 0.132 1.570 0.000 0.830 0.979 1.175 1.069 1.624 1.000 0.868
|
||||
1 2.032 0.329 -1.003 0.493 -0.136 1.159 -0.224 0.750 1.087 0.396 0.546 0.587 0.000 0.620 1.805 0.982 0.000 1.236 0.744 -1.621 0.000 0.930 1.200 0.988 0.482 0.771 0.887 0.779
|
||||
0 0.524 -1.319 0.634 0.471 1.221 0.599 -0.588 -0.461 0.000 1.230 -1.504 -1.517 1.107 1.436 -0.035 0.104 2.548 0.629 1.997 -1.282 0.000 2.084 1.450 0.984 1.084 1.827 1.547 1.213
|
||||
1 0.871 0.618 -1.544 0.718 0.186 1.041 -1.180 0.434 2.173 1.133 1.558 -1.301 0.000 0.452 -0.595 0.522 0.000 0.665 0.567 0.130 3.102 1.872 1.114 1.095 1.398 0.979 1.472 1.168
|
||||
1 3.308 1.037 -0.634 0.690 -0.619 1.975 0.949 1.280 0.000 0.826 0.546 -0.139 2.215 0.635 -0.045 0.427 0.000 1.224 0.112 1.339 3.102 1.756 1.050 0.992 0.738 0.903 0.968 1.238
|
||||
0 0.588 2.104 -0.872 1.136 1.743 0.842 0.638 0.015 0.000 0.481 0.928 1.000 2.215 0.595 0.125 1.429 0.000 0.951 -1.140 -0.511 3.102 1.031 1.057 0.979 0.673 1.064 1.001 0.891
|
||||
0 0.289 0.823 0.013 0.615 -1.601 0.177 2.403 -0.015 0.000 0.258 1.151 1.036 2.215 0.694 0.553 -1.326 2.548 0.411 0.366 0.106 0.000 0.482 0.562 0.989 0.670 0.404 0.516 0.561
|
||||
1 0.294 -0.660 -1.162 1.752 0.384 0.860 0.513 1.119 0.000 2.416 0.107 -1.342 0.000 1.398 0.361 -0.350 2.548 1.126 -0.902 0.040 1.551 0.650 1.125 0.988 0.531 0.843 0.912 0.911
|
||||
0 0.599 -0.616 1.526 1.381 0.507 0.955 -0.646 -0.085 2.173 0.775 -0.533 1.116 2.215 0.789 -0.136 -1.176 0.000 2.449 1.435 -1.433 0.000 1.692 1.699 1.000 0.869 1.119 1.508 1.303
|
||||
1 1.100 -1.174 -1.114 1.601 -1.576 1.056 -1.343 0.547 2.173 0.555 0.367 0.592 2.215 0.580 -1.862 -0.914 0.000 0.904 0.508 -0.444 0.000 1.439 1.105 0.986 1.408 1.104 1.190 1.094
|
||||
1 2.237 -0.701 1.470 0.719 -0.199 0.745 -0.132 -0.737 1.087 0.976 -0.227 0.093 2.215 0.699 0.057 1.133 0.000 0.661 0.573 -0.679 0.000 0.785 0.772 1.752 1.235 0.856 0.990 0.825
|
||||
1 0.455 -0.880 -1.482 1.260 -0.178 1.499 0.158 1.022 0.000 1.867 -0.435 -0.675 2.215 1.234 0.783 1.586 0.000 0.641 -0.454 -0.409 3.102 1.002 0.964 0.986 0.761 0.240 1.190 0.995
|
||||
1 1.158 -0.778 -0.159 0.823 1.641 1.341 -0.830 -1.169 2.173 0.840 -1.554 0.934 0.000 0.693 0.488 -1.218 2.548 1.042 1.395 0.276 0.000 0.946 0.785 1.350 1.079 0.893 1.267 1.151
|
||||
1 0.902 -0.078 -0.055 0.872 -0.012 0.843 1.276 1.739 2.173 0.838 1.492 0.918 0.000 0.626 0.904 -0.648 2.548 0.412 -2.027 -0.883 0.000 2.838 1.664 0.988 1.803 0.768 1.244 1.280
|
||||
1 0.649 -1.028 -1.521 1.097 0.774 1.216 -0.383 -0.318 2.173 1.643 -0.285 -1.705 0.000 0.911 -0.091 0.341 0.000 0.592 0.537 0.732 3.102 0.911 0.856 1.027 1.160 0.874 0.986 0.893
|
||||
1 1.192 1.846 -0.781 1.326 -0.747 1.550 1.177 1.366 0.000 1.196 0.151 0.387 2.215 0.527 2.261 -0.190 0.000 0.390 1.474 0.381 0.000 0.986 1.025 1.004 1.392 0.761 0.965 1.043
|
||||
0 0.438 -0.358 -1.549 0.836 0.436 0.818 0.276 -0.708 2.173 0.707 0.826 0.392 0.000 1.050 1.741 -1.066 0.000 1.276 -1.583 0.842 0.000 1.475 1.273 0.986 0.853 1.593 1.255 1.226
|
||||
1 1.083 0.142 1.701 0.605 -0.253 1.237 0.791 1.183 2.173 0.842 2.850 -0.082 0.000 0.724 -0.464 -0.694 0.000 1.499 0.456 -0.226 3.102 0.601 0.799 1.102 0.995 1.389 1.013 0.851
|
||||
0 0.828 1.897 -0.615 0.572 -0.545 0.572 0.461 0.464 2.173 0.393 0.356 1.069 2.215 1.840 0.088 1.500 0.000 0.407 -0.663 -0.787 0.000 0.950 0.965 0.979 0.733 0.363 0.618 0.733
|
||||
0 0.735 1.438 1.197 1.123 -0.214 0.641 0.949 0.858 0.000 1.162 0.524 -0.896 2.215 0.992 0.454 -1.475 2.548 0.902 1.079 0.019 0.000 0.822 0.917 1.203 1.032 0.569 0.780 0.764
|
||||
0 0.437 -2.102 0.044 1.779 -1.042 1.231 -0.181 -0.515 1.087 2.666 0.863 1.466 2.215 1.370 0.345 -1.371 0.000 0.906 0.363 1.611 0.000 1.140 1.362 1.013 3.931 3.004 2.724 2.028
|
||||
1 0.881 1.814 -0.987 0.384 0.800 2.384 1.422 0.640 0.000 1.528 0.292 -0.962 1.107 2.126 -0.371 -1.401 2.548 0.700 0.109 0.203 0.000 0.450 0.813 0.985 0.956 1.013 0.993 0.774
|
||||
1 0.630 0.408 0.152 0.194 0.316 0.710 -0.824 -0.358 2.173 0.741 0.535 -0.851 2.215 0.933 0.406 1.148 0.000 0.523 -0.479 -0.625 0.000 0.873 0.960 0.988 0.830 0.921 0.711 0.661
|
||||
1 0.870 -0.448 -1.134 0.616 0.135 0.600 0.649 -0.622 2.173 0.768 0.709 -0.123 0.000 1.308 0.500 1.468 0.000 1.973 -0.286 1.462 3.102 0.909 0.944 0.990 0.835 1.250 0.798 0.776
|
||||
0 1.290 0.552 1.330 0.615 -1.353 0.661 0.240 -0.393 0.000 0.531 0.053 -1.588 0.000 0.675 0.839 -0.345 1.274 1.597 0.020 0.536 3.102 1.114 0.964 0.987 0.783 0.675 0.662 0.675
|
||||
1 0.943 0.936 1.068 1.373 0.671 2.170 -2.011 -1.032 0.000 0.640 0.361 -0.806 0.000 2.239 -0.083 0.590 2.548 1.224 0.646 -1.723 0.000 0.879 0.834 0.981 1.436 0.568 0.916 0.931
|
||||
1 0.431 1.686 -1.053 0.388 1.739 0.457 -0.471 -0.743 2.173 0.786 1.432 -0.547 2.215 0.537 -0.413 1.256 0.000 0.413 2.311 -0.408 0.000 1.355 1.017 0.982 0.689 1.014 0.821 0.715
|
||||
0 1.620 -0.055 -0.862 1.341 -1.571 0.634 -0.906 0.935 2.173 0.501 -2.198 -0.525 0.000 0.778 -0.708 -0.060 0.000 0.988 -0.621 0.489 3.102 0.870 0.956 1.216 0.992 0.336 0.871 0.889
|
||||
1 0.549 0.304 -1.443 1.309 -0.312 1.116 0.644 1.519 2.173 1.078 -0.303 -0.736 0.000 1.261 0.387 0.628 2.548 0.945 -0.190 0.090 0.000 0.893 1.043 1.000 1.124 1.077 1.026 0.886
|
||||
0 0.412 -0.618 -1.486 1.133 -0.665 0.646 0.436 1.520 0.000 0.993 0.976 0.106 2.215 0.832 0.091 0.164 2.548 0.672 -0.650 1.256 0.000 0.695 1.131 0.991 1.017 0.455 1.226 1.087
|
||||
0 1.183 -0.084 1.644 1.389 0.967 0.843 0.938 -0.670 0.000 0.480 0.256 0.123 2.215 0.437 1.644 0.491 0.000 0.501 -0.416 0.101 3.102 1.060 0.804 1.017 0.775 0.173 0.535 0.760
|
||||
0 1.629 -1.486 -0.683 2.786 -0.492 1.347 -2.638 1.453 0.000 1.857 0.208 0.873 0.000 0.519 -1.265 -1.602 1.274 0.903 -1.102 -0.329 1.551 6.892 3.522 0.998 0.570 0.477 2.039 2.006
|
||||
1 2.045 -0.671 -1.235 0.490 -0.952 0.525 -1.252 1.289 0.000 1.088 -0.993 0.648 2.215 0.975 -0.109 -0.254 2.548 0.556 -1.095 -0.194 0.000 0.803 0.861 0.980 1.282 0.945 0.925 0.811
|
||||
0 0.448 -0.058 -0.974 0.945 -1.633 1.181 -1.139 0.266 2.173 1.118 -0.761 1.502 1.107 1.706 0.585 -0.680 0.000 0.487 -1.951 0.945 0.000 2.347 1.754 0.993 1.161 1.549 1.414 1.176
|
||||
0 0.551 0.519 0.448 2.183 1.293 1.220 0.628 -0.627 2.173 1.019 -0.002 -0.652 0.000 1.843 -0.386 1.042 2.548 0.400 -1.102 -1.014 0.000 0.648 0.792 1.049 0.888 2.132 1.262 1.096
|
||||
0 1.624 0.488 1.403 0.760 0.559 0.812 0.777 -1.244 2.173 0.613 0.589 -0.030 2.215 0.692 1.058 0.683 0.000 1.054 1.165 -0.765 0.000 0.915 0.875 1.059 0.821 0.927 0.792 0.721
|
||||
1 0.774 0.444 1.257 0.515 -0.689 0.515 1.448 -1.271 0.000 0.793 0.118 0.811 1.107 0.679 0.326 -0.426 0.000 1.066 -0.865 -0.049 3.102 0.960 1.046 0.986 0.716 0.772 0.855 0.732
|
||||
1 2.093 -1.240 1.615 0.918 -1.202 1.412 -0.541 0.640 1.087 2.019 0.872 -0.639 0.000 0.672 -0.936 0.972 0.000 0.896 0.235 0.212 0.000 0.810 0.700 1.090 0.797 0.862 1.049 0.874
|
||||
1 0.908 1.069 0.283 0.400 1.293 0.609 1.452 -1.136 0.000 0.623 0.417 -0.098 2.215 1.023 0.775 1.054 1.274 0.706 2.346 -1.305 0.000 0.744 1.006 0.991 0.606 0.753 0.796 0.753
|
||||
0 0.403 -1.328 -0.065 0.901 1.052 0.708 -0.354 -0.718 2.173 0.892 0.633 1.684 2.215 0.999 -1.205 0.941 0.000 0.930 1.072 -0.809 0.000 2.105 1.430 0.989 0.838 1.147 1.042 0.883
|
||||
0 1.447 0.453 0.118 1.731 0.650 0.771 0.446 -1.564 0.000 0.973 -2.014 0.354 0.000 1.949 -0.643 -1.531 1.274 1.106 -0.334 -1.163 0.000 0.795 0.821 1.013 1.699 0.918 1.118 1.018
|
||||
1 1.794 0.123 -0.454 0.057 1.489 0.966 -1.190 1.090 1.087 0.539 -0.535 1.035 0.000 1.096 -1.069 -1.236 2.548 0.659 -1.196 -0.283 0.000 0.803 0.756 0.985 1.343 1.109 0.993 0.806
|
||||
0 1.484 -2.047 0.813 0.591 -0.295 0.923 0.312 -1.164 2.173 0.654 -0.316 0.752 2.215 0.599 1.966 -1.128 0.000 0.626 -0.304 -1.431 0.000 1.112 0.910 1.090 0.986 1.189 1.350 1.472
|
||||
0 0.417 -2.016 0.849 1.817 0.040 1.201 -1.676 -1.394 0.000 0.792 0.537 0.641 2.215 0.794 -1.222 0.187 0.000 0.825 -0.217 1.334 3.102 1.470 0.931 0.987 1.203 0.525 0.833 0.827
|
||||
1 0.603 1.009 0.033 0.486 1.225 0.884 -0.617 -1.058 0.000 0.500 -1.407 -0.567 0.000 1.476 -0.876 0.605 2.548 0.970 0.560 1.092 3.102 0.853 1.153 0.988 0.846 0.920 0.944 0.835
|
||||
1 1.381 -0.326 0.552 0.417 -0.027 1.030 -0.835 -1.287 2.173 0.941 -0.421 1.519 2.215 0.615 -1.650 0.377 0.000 0.606 0.644 0.650 0.000 1.146 0.970 0.990 1.191 0.884 0.897 0.826
|
||||
1 0.632 1.200 -0.703 0.438 -1.700 0.779 -0.731 0.958 1.087 0.605 0.393 -1.376 0.000 0.670 -0.827 -1.315 2.548 0.626 -0.501 0.417 0.000 0.904 0.903 0.998 0.673 0.803 0.722 0.640
|
||||
1 1.561 -0.569 1.580 0.329 0.237 1.059 0.731 0.415 2.173 0.454 0.016 -0.828 0.000 0.587 0.008 -0.291 1.274 0.597 1.119 1.191 0.000 0.815 0.908 0.988 0.733 0.690 0.892 0.764
|
||||
1 2.102 0.087 0.449 1.164 -0.390 1.085 -0.408 -1.116 2.173 0.578 0.197 -0.137 0.000 1.202 0.917 1.523 0.000 0.959 -0.832 1.404 3.102 1.380 1.109 1.486 1.496 0.886 1.066 1.025
|
||||
1 1.698 -0.489 -0.552 0.976 -1.009 1.620 -0.721 0.648 1.087 1.481 -1.860 -1.354 0.000 1.142 -1.140 1.401 2.548 1.000 -1.274 -0.158 0.000 1.430 1.130 0.987 1.629 1.154 1.303 1.223
|
||||
1 1.111 -0.249 -1.457 0.421 0.939 0.646 -2.076 0.362 0.000 1.315 0.796 -1.436 2.215 0.780 0.130 0.055 0.000 1.662 -0.834 0.461 0.000 0.920 0.948 0.990 1.046 0.905 1.493 1.169
|
||||
1 0.945 0.390 -1.159 1.675 0.437 0.356 0.261 0.543 1.087 0.574 0.838 1.599 2.215 0.496 -1.220 -0.022 0.000 0.558 -2.454 1.440 0.000 0.763 0.983 1.728 1.000 0.578 0.922 1.003
|
||||
1 2.076 0.014 -1.314 0.854 -0.306 3.446 1.341 0.598 0.000 2.086 0.227 -0.747 2.215 1.564 -0.216 1.649 2.548 0.965 -0.857 -1.062 0.000 0.477 0.734 1.456 1.003 1.660 1.001 0.908
|
||||
1 1.992 0.192 -0.103 0.108 -1.599 0.938 0.595 -1.360 2.173 0.869 -1.012 1.432 0.000 1.302 0.850 0.436 2.548 0.487 1.051 -1.027 0.000 0.502 0.829 0.983 1.110 1.394 0.904 0.836
|
||||
0 0.460 1.625 1.485 1.331 1.242 0.675 -0.329 -1.039 1.087 0.671 -1.028 -0.514 0.000 1.265 -0.788 0.415 1.274 0.570 -0.683 -1.738 0.000 0.725 0.758 1.004 1.024 1.156 0.944 0.833
|
||||
0 0.871 0.839 -1.536 0.428 1.198 0.875 -1.256 -0.466 1.087 0.684 -0.768 0.150 0.000 0.556 -1.793 0.389 0.000 0.942 -1.126 1.339 1.551 0.624 0.734 0.986 1.357 0.960 1.474 1.294
|
||||
1 0.951 1.651 0.576 1.273 1.495 0.834 0.048 -0.578 2.173 0.386 -0.056 -1.448 0.000 0.597 -0.196 0.162 2.548 0.524 1.649 1.625 0.000 0.737 0.901 1.124 1.014 0.556 1.039 0.845
|
||||
1 1.049 -0.223 0.685 0.256 -1.191 2.506 0.238 -0.359 0.000 1.510 -0.904 1.158 1.107 2.733 -0.902 1.679 2.548 0.407 -0.474 -1.572 0.000 1.513 2.472 0.982 1.238 0.978 1.985 1.510
|
||||
0 0.455 -0.028 0.265 1.286 1.373 0.459 0.331 -0.922 0.000 0.343 0.634 0.430 0.000 0.279 -0.084 -0.272 0.000 0.475 0.926 -0.123 3.102 0.803 0.495 0.987 0.587 0.211 0.417 0.445
|
||||
1 2.074 0.388 0.878 1.110 1.557 1.077 -0.226 -0.295 2.173 0.865 -0.319 -1.116 2.215 0.707 -0.835 0.722 0.000 0.632 -0.608 -0.728 0.000 0.715 0.802 1.207 1.190 0.960 1.143 0.926
|
||||
1 1.390 0.265 1.196 0.919 -1.371 1.858 0.506 0.786 0.000 1.280 -1.367 -0.720 2.215 1.483 -0.441 -0.675 2.548 1.076 0.294 -0.539 0.000 1.126 0.830 1.155 1.551 0.702 1.103 0.933
|
||||
1 1.014 -0.079 1.597 1.038 -0.281 1.135 -0.722 -0.177 2.173 0.544 -1.475 -1.501 0.000 1.257 -1.315 1.212 0.000 0.496 -0.060 1.180 1.551 0.815 0.611 1.411 1.110 0.792 0.846 0.853
|
||||
0 0.335 1.267 -1.154 2.011 -0.574 0.753 0.618 1.411 0.000 0.474 0.748 0.681 2.215 0.608 -0.446 -0.354 2.548 0.399 1.295 -0.581 0.000 0.911 0.882 0.975 0.832 0.598 0.580 0.678
|
||||
1 0.729 -0.189 1.182 0.293 1.310 0.412 0.459 -0.632 0.000 0.869 -1.128 -0.625 2.215 1.173 -0.893 0.478 2.548 0.584 -2.394 -1.727 0.000 2.016 1.272 0.995 1.034 0.905 0.966 1.038
|
||||
1 1.225 -1.215 -0.088 0.881 -0.237 0.600 -0.976 1.462 2.173 0.876 0.506 1.583 2.215 0.718 1.228 -0.031 0.000 0.653 -1.292 1.216 0.000 0.838 1.108 0.981 1.805 0.890 1.251 1.197
|
||||
1 2.685 -0.444 0.847 0.253 0.183 0.641 -1.541 -0.873 2.173 0.417 2.874 -0.551 0.000 0.706 -1.431 0.764 0.000 1.390 -0.596 -1.397 0.000 0.894 0.829 0.993 0.789 0.654 0.883 0.746
|
||||
0 0.638 -0.481 0.683 1.457 -1.024 0.707 -1.338 1.498 0.000 0.980 0.518 0.289 2.215 0.964 -0.531 -0.423 0.000 0.694 -0.654 -1.314 3.102 0.807 1.283 1.335 0.658 0.907 0.797 0.772
|
||||
1 1.789 -0.765 -0.732 0.421 -0.020 1.142 -1.353 1.439 2.173 0.725 -1.518 -1.261 0.000 0.812 -2.597 -0.463 0.000 1.203 -0.120 1.001 0.000 0.978 0.673 0.985 1.303 1.400 1.078 0.983
|
||||
1 0.784 -1.431 1.724 0.848 0.559 0.615 -1.643 -1.456 0.000 1.339 -0.513 0.040 2.215 0.394 -2.483 1.304 0.000 0.987 0.889 -0.339 0.000 0.732 0.713 0.987 0.973 0.705 0.875 0.759
|
||||
1 0.911 1.098 -1.289 0.421 0.823 1.218 -0.503 0.431 0.000 0.775 0.432 -1.680 0.000 0.855 -0.226 -0.460 2.548 0.646 -0.947 -1.243 1.551 2.201 1.349 0.985 0.730 0.451 0.877 0.825
|
||||
1 0.959 0.372 -0.269 1.255 0.702 1.151 0.097 0.805 2.173 0.993 1.011 0.767 2.215 1.096 0.185 0.381 0.000 1.001 -0.205 0.059 0.000 0.979 0.997 1.168 0.796 0.771 0.839 0.776
|
||||
0 0.283 -1.864 -1.663 0.219 1.624 0.955 -1.213 0.932 2.173 0.889 0.395 -0.268 0.000 0.597 -1.083 -0.921 2.548 0.584 1.325 -1.072 0.000 0.856 0.927 0.996 0.937 0.936 1.095 0.892
|
||||
0 2.017 -0.488 -0.466 1.029 -0.870 3.157 0.059 -0.343 2.173 3.881 0.872 1.502 1.107 3.631 1.720 0.963 0.000 0.633 -1.264 -1.734 0.000 4.572 3.339 1.005 1.407 5.590 3.614 3.110
|
||||
1 1.088 0.414 -0.841 0.485 0.605 0.860 1.110 -0.568 0.000 1.152 -0.325 1.203 2.215 0.324 1.652 -0.104 0.000 0.510 1.095 -1.728 0.000 0.880 0.722 0.989 0.977 0.711 0.888 0.762
|
||||
0 0.409 -1.717 0.712 0.809 -1.301 0.701 -1.529 -1.411 0.000 1.191 -0.582 0.438 2.215 1.147 0.813 -0.571 2.548 1.039 0.543 0.892 0.000 0.636 0.810 0.986 0.861 1.411 0.907 0.756
|
||||
1 1.094 1.577 -0.988 0.497 -0.149 0.891 -2.459 1.034 0.000 0.646 0.792 -1.022 0.000 1.573 0.254 -0.053 2.548 1.428 0.190 -1.641 3.102 4.322 2.687 0.985 0.881 1.135 1.907 1.831
|
||||
1 0.613 1.993 -0.280 0.544 0.931 0.909 1.526 1.559 0.000 0.840 1.473 -0.483 2.215 0.856 0.352 0.408 2.548 1.058 1.733 -1.396 0.000 0.801 1.066 0.984 0.639 0.841 0.871 0.748
|
||||
0 0.958 -1.202 0.600 0.434 0.170 0.783 -0.214 1.319 0.000 0.835 -0.454 -0.615 2.215 0.658 -1.858 -0.891 0.000 0.640 0.172 -1.204 3.102 1.790 1.086 0.997 0.804 0.403 0.793 0.756
|
||||
1 1.998 -0.238 0.972 0.058 0.266 0.759 1.576 -0.357 2.173 1.004 -0.349 -0.747 2.215 0.962 0.490 -0.453 0.000 1.592 0.661 -1.405 0.000 0.874 1.086 0.990 1.436 1.527 1.177 0.993
|
||||
1 0.796 -0.171 -0.818 0.574 -1.625 1.201 -0.737 1.451 2.173 0.651 0.404 -0.452 0.000 1.150 -0.652 -0.120 0.000 1.008 -0.093 0.531 3.102 0.884 0.706 0.979 1.193 0.937 0.943 0.881
|
||||
1 0.773 1.023 0.527 1.537 -0.201 2.967 -0.574 -1.534 2.173 2.346 -0.307 0.394 2.215 1.393 0.135 -0.027 0.000 3.015 0.187 0.516 0.000 0.819 1.260 0.982 2.552 3.862 2.179 1.786
|
||||
0 1.823 1.008 -1.489 0.234 -0.962 0.591 0.461 0.996 2.173 0.568 -1.297 -0.410 0.000 0.887 2.157 1.194 0.000 2.079 0.369 -0.085 3.102 0.770 0.945 0.995 1.179 0.971 0.925 0.983
|
||||
0 0.780 0.640 0.490 0.680 -1.301 0.715 -0.137 0.152 2.173 0.616 -0.831 1.668 0.000 1.958 0.528 -0.982 2.548 0.966 -1.551 0.462 0.000 1.034 1.079 1.008 0.827 1.369 1.152 0.983
|
||||
1 0.543 0.801 1.543 1.134 -0.772 0.954 -0.849 0.410 1.087 0.851 -1.988 1.686 0.000 0.799 -0.912 -1.156 0.000 0.479 0.097 1.334 0.000 0.923 0.597 0.989 1.231 0.759 0.975 0.867
|
||||
0 1.241 -0.014 0.129 1.158 0.670 0.445 -0.732 1.739 2.173 0.918 0.659 -1.340 2.215 0.557 2.410 -1.404 0.000 0.966 -1.545 -1.120 0.000 0.874 0.918 0.987 1.001 0.798 0.904 0.937
|
||||
0 1.751 -0.266 -1.575 0.489 1.292 1.112 1.533 0.137 2.173 1.204 -0.414 -0.928 0.000 0.879 1.237 -0.415 2.548 1.479 1.469 0.913 0.000 2.884 1.747 0.989 1.742 0.600 1.363 1.293
|
||||
1 1.505 1.208 -1.476 0.995 -0.836 2.800 -1.600 0.111 0.000 2.157 1.241 1.110 2.215 1.076 2.619 -0.913 0.000 1.678 2.204 -1.575 0.000 0.849 1.224 0.990 1.412 0.976 1.271 1.105
|
||||
0 0.816 0.611 0.779 1.694 0.278 0.575 -0.787 1.592 2.173 1.148 1.076 -0.831 2.215 0.421 1.316 0.632 0.000 0.589 0.452 -1.466 0.000 0.779 0.909 0.990 1.146 1.639 1.236 0.949
|
||||
1 0.551 -0.808 0.330 1.188 -0.294 0.447 -0.035 -0.993 0.000 0.432 -0.276 -0.481 2.215 1.959 -0.288 1.195 2.548 0.638 0.583 1.107 0.000 0.832 0.924 0.993 0.723 0.976 0.968 0.895
|
||||
0 1.316 -0.093 0.995 0.860 -0.621 0.593 -0.560 -1.599 2.173 0.524 -0.318 -0.240 2.215 0.566 0.759 -0.368 0.000 0.483 -2.030 -1.104 0.000 1.468 1.041 1.464 0.811 0.778 0.690 0.722
|
||||
1 1.528 0.067 -0.855 0.959 -1.464 1.143 -0.082 1.023 0.000 0.702 -0.763 -0.244 0.000 0.935 -0.881 0.206 2.548 0.614 -0.831 1.657 3.102 1.680 1.105 0.983 1.078 0.559 0.801 0.809
|
||||
0 0.558 -0.833 -0.598 1.436 -1.724 1.316 -0.661 1.593 2.173 1.148 -0.503 -0.132 1.107 1.584 -0.125 0.380 0.000 1.110 -1.216 -0.181 0.000 1.258 0.860 1.053 0.790 1.814 1.159 1.007
|
||||
1 0.819 0.879 1.221 0.598 -1.450 0.754 0.417 -0.369 2.173 0.477 1.199 0.274 0.000 1.073 0.368 0.273 2.548 1.599 2.047 1.690 0.000 0.933 0.984 0.983 0.788 0.613 0.728 0.717
|
||||
0 0.981 -1.007 0.489 0.923 1.261 0.436 -0.698 -0.506 2.173 0.764 -1.105 -1.241 2.215 0.577 -2.573 -0.036 0.000 0.565 -1.628 1.610 0.000 0.688 0.801 0.991 0.871 0.554 0.691 0.656
|
||||
0 2.888 0.568 -1.416 1.461 -1.157 1.756 -0.900 0.522 0.000 0.657 0.409 1.076 2.215 1.419 0.672 -0.019 0.000 1.436 -0.184 -0.980 3.102 0.946 0.919 0.995 1.069 0.890 0.834 0.856
|
||||
1 0.522 1.805 -0.963 1.136 0.418 0.727 -0.195 -1.695 2.173 0.309 2.559 -0.178 0.000 0.521 1.794 0.919 0.000 0.788 0.174 -0.406 3.102 0.555 0.729 1.011 1.385 0.753 0.927 0.832
|
||||
1 0.793 -0.162 -1.643 0.634 0.337 0.898 -0.633 1.689 0.000 0.806 -0.826 -0.356 2.215 0.890 -0.142 -1.268 0.000 1.293 0.574 0.725 0.000 0.833 1.077 0.988 0.721 0.679 0.867 0.753
|
||||
0 1.298 1.098 0.280 0.371 -0.373 0.855 -0.306 -1.186 0.000 0.977 -0.421 1.003 0.000 0.978 0.956 -1.249 2.548 0.735 0.577 -0.037 3.102 0.974 1.002 0.992 0.549 0.587 0.725 0.954
|
||||
1 0.751 -0.520 -1.653 0.168 -0.419 0.878 -1.023 -1.364 2.173 1.310 -0.667 0.863 0.000 1.196 -0.827 0.358 0.000 1.154 -0.165 -0.360 1.551 0.871 0.950 0.983 0.907 0.955 0.959 0.874
|
||||
0 1.730 0.666 -1.432 0.446 1.302 0.921 -0.203 0.621 0.000 1.171 -0.365 -0.611 1.107 0.585 0.807 1.150 0.000 0.415 -0.843 1.311 0.000 0.968 0.786 0.986 1.059 0.371 0.790 0.848
|
||||
1 0.596 -1.486 0.690 1.045 -1.344 0.928 0.867 0.820 2.173 0.610 0.999 -1.329 2.215 0.883 -0.001 -0.106 0.000 1.145 2.184 -0.808 0.000 2.019 1.256 1.056 1.751 1.037 1.298 1.518
|
||||
1 0.656 -1.993 -0.519 1.643 -0.143 0.815 0.256 1.220 1.087 0.399 -1.184 -1.458 0.000 0.738 1.361 -1.443 0.000 0.842 0.033 0.293 0.000 0.910 0.891 0.993 0.668 0.562 0.958 0.787
|
||||
1 1.127 -0.542 0.645 0.318 -1.496 0.661 -0.640 0.369 2.173 0.992 0.358 1.702 0.000 1.004 0.316 -1.109 0.000 1.616 -0.936 -0.707 1.551 0.875 1.191 0.985 0.651 0.940 0.969 0.834
|
||||
0 0.916 -1.423 -1.490 1.248 -0.538 0.625 -0.535 -0.174 0.000 0.769 -0.389 1.608 2.215 0.667 -1.138 -1.738 1.274 0.877 -0.019 0.482 0.000 0.696 0.917 1.121 0.678 0.347 0.647 0.722
|
||||
1 2.756 -0.637 -1.715 1.331 1.124 0.913 -0.296 -0.491 0.000 0.983 -0.831 0.000 2.215 1.180 -0.428 0.742 0.000 1.113 0.005 -1.157 1.551 1.681 1.096 1.462 0.976 0.917 1.009 1.040
|
||||
0 0.755 1.754 0.701 2.111 0.256 1.243 0.057 -1.502 2.173 0.565 -0.034 -1.078 1.107 0.529 1.696 -1.090 0.000 0.665 0.292 0.107 0.000 0.870 0.780 0.990 2.775 0.465 1.876 1.758
|
||||
1 0.593 -0.762 1.743 0.908 0.442 0.773 -1.357 -0.768 2.173 0.432 1.421 1.236 0.000 0.579 0.291 -0.403 0.000 0.966 -0.309 1.016 3.102 0.893 0.743 0.989 0.857 1.030 0.943 0.854
|
||||
1 0.891 -1.151 -1.269 0.504 -0.622 0.893 -0.549 0.700 0.000 0.828 -0.825 0.154 2.215 1.083 0.632 -1.141 0.000 1.059 -0.557 1.526 3.102 2.117 1.281 0.987 0.819 0.802 0.917 0.828
|
||||
1 2.358 -0.248 0.080 0.747 -0.975 1.019 1.374 1.363 0.000 0.935 0.127 -1.707 2.215 0.312 -0.827 0.017 0.000 0.737 1.059 -0.327 0.000 0.716 0.828 1.495 0.953 0.704 0.880 0.745
|
||||
0 0.660 -0.017 -1.138 0.453 1.002 0.645 0.518 0.703 2.173 0.751 0.705 -0.592 2.215 0.744 -0.909 -1.596 0.000 0.410 -1.135 0.481 0.000 0.592 0.922 0.989 0.897 0.948 0.777 0.701
|
||||
1 0.718 0.518 0.225 1.710 -0.022 1.888 -0.424 1.092 0.000 4.134 0.185 -1.366 0.000 1.415 1.293 0.242 2.548 2.351 0.264 -0.057 3.102 0.830 1.630 0.976 1.215 0.890 1.422 1.215
|
||||
1 1.160 0.203 0.941 0.594 0.212 0.636 -0.556 0.679 2.173 1.089 -0.481 -1.008 1.107 1.245 -0.056 -1.357 0.000 0.587 1.007 0.056 0.000 1.106 0.901 0.987 0.786 1.224 0.914 0.837
|
||||
1 0.697 0.542 0.619 0.985 1.481 0.745 0.415 1.644 2.173 0.903 0.495 -0.958 2.215 1.165 1.195 0.346 0.000 1.067 -0.881 -0.264 0.000 0.830 1.025 0.987 0.690 0.863 0.894 0.867
|
||||
0 1.430 0.190 -0.700 0.246 0.518 1.302 0.660 -0.247 2.173 1.185 -0.539 1.504 0.000 1.976 -0.401 1.079 0.000 0.855 -0.958 -1.110 3.102 0.886 0.953 0.993 0.889 1.400 1.376 1.119
|
||||
1 1.122 -0.795 0.202 0.397 -1.553 0.597 -1.459 -0.734 2.173 0.522 1.044 1.027 2.215 0.783 -1.243 1.701 0.000 0.371 1.737 0.199 0.000 1.719 1.176 0.988 0.723 1.583 1.063 0.914
|
||||
0 1.153 0.526 1.236 0.266 0.001 1.139 -1.236 -0.585 2.173 1.337 -0.215 -1.356 2.215 1.780 1.129 0.902 0.000 1.608 -0.391 -0.161 0.000 1.441 1.633 0.990 1.838 1.516 1.635 1.373
|
||||
1 0.760 1.012 0.758 0.937 0.051 0.941 0.687 -1.247 2.173 1.288 -0.743 0.822 0.000 1.552 1.782 -1.533 0.000 0.767 1.349 0.168 0.000 0.716 0.862 0.988 0.595 0.359 0.697 0.623
|
||||
1 1.756 -1.469 1.395 1.345 -1.595 0.817 0.017 -0.741 2.173 0.483 -0.008 0.293 0.000 1.768 -0.663 0.438 1.274 1.202 -1.387 -0.222 0.000 1.022 1.058 0.992 1.407 1.427 1.356 1.133
|
||||
0 0.397 0.582 -0.758 1.260 -1.735 0.889 -0.515 1.139 2.173 0.973 1.616 0.460 0.000 1.308 1.001 -0.709 2.548 0.858 0.995 -0.231 0.000 0.749 0.888 0.979 1.487 1.804 1.208 1.079
|
||||
0 0.515 -0.984 0.425 1.114 -0.439 1.999 0.818 1.561 0.000 1.407 0.009 -0.380 0.000 1.332 0.230 0.397 0.000 1.356 -0.616 -1.057 3.102 0.978 1.017 0.990 1.118 0.862 0.835 0.919
|
||||
1 1.368 -0.921 -0.866 0.842 -0.598 0.456 -1.176 1.219 1.087 0.419 -1.974 -0.819 0.000 0.791 -1.640 0.881 0.000 1.295 -0.782 0.442 3.102 0.945 0.761 0.974 0.915 0.535 0.733 0.651
|
||||
0 2.276 0.134 0.399 2.525 0.376 1.111 -1.078 -1.571 0.000 0.657 2.215 -0.900 0.000 1.183 -0.662 -0.508 2.548 1.436 -0.517 0.960 3.102 0.569 0.931 0.993 1.170 0.967 0.879 1.207
|
||||
0 0.849 0.907 0.124 0.652 1.585 0.715 0.355 -1.200 0.000 0.599 -0.892 1.301 0.000 1.106 1.151 0.582 0.000 1.895 -0.279 -0.568 3.102 0.881 0.945 0.998 0.559 0.649 0.638 0.660
|
||||
1 2.105 0.248 -0.797 0.530 0.206 1.957 -2.175 0.797 0.000 1.193 0.637 -1.646 2.215 0.881 1.111 -1.046 0.000 0.872 -0.185 1.085 1.551 0.986 1.343 1.151 1.069 0.714 2.063 1.951
|
||||
1 1.838 1.060 1.637 1.017 1.370 0.913 0.461 -0.609 1.087 0.766 -0.461 0.303 2.215 0.724 -0.061 0.886 0.000 0.941 1.123 -0.745 0.000 0.858 0.847 0.979 1.313 1.083 1.094 0.910
|
||||
0 0.364 1.274 1.066 1.570 -0.394 0.485 0.012 -1.716 0.000 0.317 -1.233 0.534 2.215 0.548 -2.165 0.762 0.000 0.729 0.169 -0.318 3.102 0.892 0.944 1.013 0.594 0.461 0.688 0.715
|
||||
1 0.503 1.343 -0.031 1.134 -1.204 0.590 -0.309 0.174 2.173 0.408 2.372 -0.628 0.000 1.850 0.400 1.147 2.548 0.664 -0.458 -0.885 0.000 1.445 1.283 0.989 1.280 1.118 1.127 1.026
|
||||
0 1.873 0.258 0.103 2.491 0.530 1.678 0.644 -1.738 2.173 1.432 0.848 -1.340 0.000 0.621 1.323 -1.316 0.000 0.628 0.789 -0.206 1.551 0.426 0.802 1.125 0.688 1.079 1.338 1.239
|
||||
1 0.826 -0.732 1.587 0.582 -1.236 0.495 0.757 -0.741 2.173 0.940 1.474 0.354 2.215 0.474 1.055 -1.657 0.000 0.415 1.758 0.841 0.000 0.451 0.578 0.984 0.757 0.922 0.860 0.696
|
||||
0 0.935 -1.614 -0.597 0.299 1.223 0.707 -0.853 -1.026 0.000 0.751 0.007 -1.691 0.000 1.062 -0.125 0.976 2.548 0.877 1.275 0.646 0.000 0.962 1.074 0.980 0.608 0.726 0.741 0.662
|
||||
1 0.643 0.542 -1.285 0.474 -0.366 0.667 -0.446 1.195 2.173 1.076 0.145 -0.126 0.000 0.970 -0.661 0.394 1.274 1.218 -0.184 -1.722 0.000 1.331 1.019 0.985 1.192 0.677 0.973 0.910
|
||||
0 0.713 0.164 1.080 1.427 -0.460 0.960 -0.152 -0.940 2.173 1.427 -0.901 1.036 1.107 0.440 -1.269 -0.194 0.000 0.452 1.932 -0.532 0.000 1.542 1.210 1.374 1.319 1.818 1.220 1.050
|
||||
0 0.876 -0.463 -1.224 2.458 -1.689 1.007 -0.752 0.398 0.000 2.456 -1.285 -0.152 1.107 1.641 1.838 1.717 0.000 0.458 0.194 0.488 3.102 4.848 2.463 0.986 1.981 0.974 2.642 2.258
|
||||
1 0.384 -0.275 0.387 1.403 -0.994 0.620 -1.529 1.685 0.000 1.091 -1.644 1.078 0.000 0.781 -1.311 0.326 2.548 1.228 -0.728 -0.633 1.551 0.920 0.854 0.987 0.646 0.609 0.740 0.884
|
||||
0 0.318 -1.818 -1.008 0.977 1.268 0.457 2.451 -1.522 0.000 0.881 1.351 0.461 2.215 0.929 0.239 -0.380 2.548 0.382 -0.613 1.330 0.000 1.563 1.193 0.994 0.829 0.874 0.901 1.026
|
||||
1 0.612 -1.120 1.098 0.402 -0.480 0.818 0.188 1.511 0.000 0.800 -0.253 0.977 0.000 1.175 0.271 -1.289 1.274 2.531 0.226 -0.409 3.102 0.889 0.947 0.979 1.486 0.940 1.152 1.119
|
||||
1 0.587 -0.737 -0.228 0.970 1.119 0.823 0.184 1.594 0.000 1.104 0.301 -0.818 2.215 0.819 0.712 -0.560 0.000 2.240 -0.419 0.340 3.102 1.445 1.103 0.988 0.715 1.363 1.019 0.926
|
||||
0 1.030 -0.694 -1.638 0.893 -1.074 1.160 -0.766 0.485 0.000 1.632 -0.698 -1.142 2.215 1.050 -1.092 0.952 0.000 1.475 0.286 0.125 3.102 0.914 1.075 0.982 0.732 1.493 1.219 1.079
|
||||
1 2.142 0.617 1.517 0.387 -0.862 0.345 1.203 -1.014 2.173 0.609 1.092 0.275 0.000 1.331 0.582 -0.183 2.548 0.557 1.540 -1.642 0.000 0.801 0.737 1.060 0.715 0.626 0.749 0.674
|
||||
0 1.076 0.240 -0.246 0.871 -1.241 0.496 0.282 0.746 2.173 1.095 -0.648 1.100 2.215 0.446 -1.756 0.764 0.000 0.434 0.788 -0.991 0.000 1.079 0.868 1.047 0.818 0.634 0.795 0.733
|
||||
0 1.400 0.901 -1.617 0.625 -0.163 0.661 -0.411 -1.616 2.173 0.685 0.524 0.425 0.000 0.881 -0.766 0.312 0.000 0.979 0.255 -0.667 3.102 0.898 1.105 1.253 0.730 0.716 0.738 0.795
|
||||
0 3.302 1.132 1.051 0.658 0.768 1.308 0.251 -0.374 1.087 1.673 0.015 -0.898 0.000 0.688 -0.535 1.363 1.274 0.871 1.325 -1.583 0.000 1.646 1.249 0.995 1.919 1.288 1.330 1.329
|
||||
0 1.757 0.202 0.750 0.767 -0.362 0.932 -1.033 -1.366 0.000 1.529 -1.012 -0.771 0.000 1.161 -0.287 0.059 0.000 2.185 1.147 1.099 3.102 0.795 0.529 1.354 1.144 1.491 1.319 1.161
|
||||
0 1.290 0.905 -1.711 1.017 -0.695 1.008 -1.038 0.693 2.173 1.202 -0.595 0.187 0.000 1.011 0.139 -1.607 0.000 0.789 -0.613 -1.041 3.102 1.304 0.895 1.259 1.866 0.955 1.211 1.200
|
||||
1 1.125 -0.004 1.694 0.373 0.329 0.978 0.640 -0.391 0.000 1.122 -0.376 1.521 2.215 0.432 2.413 -1.259 0.000 0.969 0.730 0.512 3.102 0.716 0.773 0.991 0.624 0.977 0.981 0.875
|
||||
0 1.081 0.861 1.252 1.621 1.474 1.293 0.600 0.630 0.000 1.991 -0.090 -0.675 2.215 0.861 1.105 -0.201 0.000 1.135 2.489 -1.659 0.000 1.089 0.657 0.991 2.179 0.412 1.334 1.071
|
||||
1 0.652 -0.294 1.241 1.034 0.490 1.033 0.551 -0.963 2.173 0.661 1.031 -1.654 2.215 1.376 -0.018 0.843 0.000 0.943 -0.329 -0.269 0.000 1.085 1.067 0.991 1.504 0.773 1.135 0.993
|
||||
1 1.408 -1.028 -1.018 0.252 -0.242 0.465 -0.364 -0.200 0.000 1.466 0.669 0.739 1.107 1.031 0.415 -1.468 2.548 0.457 -1.091 -1.722 0.000 0.771 0.811 0.979 1.459 1.204 1.041 0.866
|
||||
1 0.781 -1.143 -0.659 0.961 1.266 1.183 -0.686 0.119 2.173 1.126 -0.064 1.447 0.000 0.730 1.430 -1.535 0.000 1.601 0.513 1.658 0.000 0.871 1.345 1.184 1.058 0.620 1.107 0.978
|
||||
1 1.300 -0.616 1.032 0.751 -0.731 0.961 -0.716 1.592 0.000 2.079 -1.063 -0.271 2.215 0.475 0.518 1.695 1.274 0.395 -2.204 0.349 0.000 1.350 0.983 1.369 1.265 1.428 1.135 0.982
|
||||
1 0.833 0.809 1.657 1.637 1.019 0.705 1.077 -0.968 2.173 1.261 0.114 -0.298 1.107 1.032 0.017 0.236 0.000 0.640 -0.026 -1.598 0.000 0.894 0.982 0.981 1.250 1.054 1.018 0.853
|
||||
1 1.686 -1.090 -0.301 0.890 0.557 1.304 -0.284 -1.393 2.173 0.388 2.118 0.513 0.000 0.514 -0.015 0.891 0.000 0.460 0.547 0.627 3.102 0.942 0.524 1.186 1.528 0.889 1.015 1.122
|
||||
1 0.551 0.911 0.879 0.379 -0.796 1.154 -0.808 -0.966 0.000 1.168 -0.513 0.355 2.215 0.646 -1.309 0.773 0.000 0.544 -0.283 1.301 3.102 0.847 0.705 0.990 0.772 0.546 0.790 0.719
|
||||
1 1.597 0.793 -1.119 0.691 -1.455 0.370 0.337 1.354 0.000 0.646 -1.005 0.732 2.215 1.019 0.040 0.209 0.000 0.545 0.958 0.239 3.102 0.962 0.793 0.994 0.719 0.745 0.812 0.739
|
||||
0 1.033 -1.193 -0.452 0.247 0.970 0.503 -1.424 1.362 0.000 1.062 -0.416 -1.156 2.215 0.935 -0.023 0.555 2.548 0.410 -1.766 0.379 0.000 0.590 0.953 0.991 0.717 1.081 0.763 0.690
|
||||
1 0.859 -1.004 1.521 0.781 -0.993 0.677 0.643 -0.338 2.173 0.486 0.409 1.283 0.000 0.679 0.110 0.285 0.000 0.715 -0.735 -0.157 1.551 0.702 0.773 0.984 0.627 0.633 0.694 0.643
|
||||
0 0.612 -1.127 1.074 1.225 -0.426 0.927 -2.141 -0.473 0.000 1.290 -0.927 -1.085 2.215 1.183 1.981 -1.687 0.000 2.176 0.406 -1.581 0.000 0.945 0.651 1.170 0.895 1.604 1.179 1.142
|
||||
1 0.535 0.321 -1.095 0.281 -0.960 0.876 -0.709 -0.076 0.000 1.563 -0.666 1.536 2.215 0.773 -0.321 0.435 0.000 0.682 -0.801 -0.952 3.102 0.711 0.667 0.985 0.888 0.741 0.872 0.758
|
||||
1 0.745 1.586 1.578 0.863 -1.423 0.530 1.714 1.085 0.000 1.174 0.679 1.015 0.000 1.158 0.609 -1.186 2.548 1.851 0.832 -0.248 3.102 0.910 1.164 0.983 0.947 0.858 0.928 0.823
|
||||
0 0.677 -1.014 -1.648 1.455 1.461 0.596 -2.358 0.517 0.000 0.800 0.849 -0.743 2.215 1.024 -0.282 -1.004 0.000 1.846 -0.977 0.378 3.102 2.210 1.423 0.982 1.074 1.623 1.417 1.258
|
||||
1 0.815 -1.263 0.057 1.018 -0.208 0.339 -0.347 -1.646 2.173 1.223 0.600 -1.658 2.215 1.435 0.042 0.926 0.000 0.777 1.698 -0.698 0.000 1.022 1.058 1.000 0.784 0.477 0.886 0.836
|
||||
0 3.512 -1.094 -0.220 0.338 -0.328 1.962 -1.099 1.544 1.087 1.461 -1.305 -0.922 2.215 1.219 -1.289 0.400 0.000 0.731 0.155 1.249 0.000 1.173 1.366 0.993 2.259 2.000 1.626 1.349
|
||||
0 0.904 1.248 0.325 0.317 -1.624 0.685 -0.538 1.665 2.173 0.685 -2.145 -1.106 0.000 0.632 -1.460 1.017 0.000 1.085 -0.182 0.162 3.102 0.885 0.801 0.989 0.930 0.904 1.012 0.961
|
||||
@@ -1,500 +0,0 @@
|
||||
1 0.644 0.247 -0.447 0.862 0.374 0.854 -1.126 -0.790 2.173 1.015 -0.201 1.400 0.000 1.575 1.807 1.607 0.000 1.585 -0.190 -0.744 3.102 0.958 1.061 0.980 0.875 0.581 0.905 0.796
|
||||
0 0.385 1.800 1.037 1.044 0.349 1.502 -0.966 1.734 0.000 0.966 -1.960 -0.249 0.000 1.501 0.465 -0.354 2.548 0.834 -0.440 0.638 3.102 0.695 0.909 0.981 0.803 0.813 1.149 1.116
|
||||
0 1.214 -0.166 0.004 0.505 1.434 0.628 -1.174 -1.230 1.087 0.579 -1.047 -0.118 0.000 0.835 0.340 1.234 2.548 0.711 -1.383 1.355 0.000 0.848 0.911 1.043 0.931 1.058 0.744 0.696
|
||||
1 0.420 1.111 0.137 1.516 -1.657 0.854 0.623 1.605 1.087 1.511 -1.297 0.251 0.000 0.872 -0.368 -0.721 0.000 0.543 0.731 1.424 3.102 1.597 1.282 1.105 0.730 0.148 1.231 1.234
|
||||
0 0.897 -1.703 -1.306 1.022 -0.729 0.836 0.859 -0.333 2.173 1.336 -0.965 0.972 2.215 0.671 1.021 -1.439 0.000 0.493 -2.019 -0.289 0.000 0.805 0.930 0.984 1.430 2.198 1.934 1.684
|
||||
0 0.756 1.126 -0.945 2.355 -0.555 0.889 0.800 1.440 0.000 0.585 0.271 0.631 2.215 0.722 1.744 1.051 0.000 0.618 0.924 0.698 1.551 0.976 0.864 0.988 0.803 0.234 0.822 0.911
|
||||
0 1.141 -0.741 0.953 1.478 -0.524 1.197 -0.871 1.689 2.173 0.875 1.321 -0.518 1.107 0.540 0.037 -0.987 0.000 0.879 1.187 0.245 0.000 0.888 0.701 1.747 1.358 2.479 1.491 1.223
|
||||
1 0.606 -0.936 -0.384 1.257 -1.162 2.719 -0.600 0.100 2.173 3.303 -0.284 1.561 1.107 0.689 1.786 -0.326 0.000 0.780 -0.532 1.216 0.000 0.936 2.022 0.985 1.574 4.323 2.263 1.742
|
||||
1 0.603 0.429 -0.279 1.448 1.301 1.008 2.423 -1.295 0.000 0.452 1.305 0.533 0.000 1.076 1.011 1.256 2.548 2.021 1.260 -0.343 0.000 0.890 0.969 1.281 0.763 0.652 0.827 0.785
|
||||
0 1.171 -0.962 0.521 0.841 -0.315 1.196 -0.744 -0.882 2.173 0.726 -1.305 1.377 1.107 0.643 -1.790 -1.264 0.000 1.257 0.222 0.817 0.000 0.862 0.911 0.987 0.846 1.293 0.899 0.756
|
||||
1 1.392 -0.358 0.235 1.494 -0.461 0.895 -0.848 1.549 2.173 0.841 -0.384 0.666 1.107 1.199 2.509 -0.891 0.000 1.109 -0.364 -0.945 0.000 0.693 2.135 1.170 1.362 0.959 2.056 1.842
|
||||
1 1.024 1.076 -0.886 0.851 1.530 0.673 -0.449 0.187 1.087 0.628 -0.895 1.176 2.215 0.696 -0.232 -0.875 0.000 0.411 1.501 0.048 0.000 0.842 0.919 1.063 1.193 0.777 0.964 0.807
|
||||
1 0.890 -0.760 1.182 1.369 0.751 0.696 -0.959 -0.710 1.087 0.775 -0.130 -1.409 2.215 0.701 -0.110 -0.739 0.000 0.508 -0.451 0.390 0.000 0.762 0.738 0.998 1.126 0.788 0.940 0.790
|
||||
1 0.460 0.537 0.636 1.442 -0.269 0.585 0.323 -1.731 2.173 0.503 1.034 -0.927 0.000 0.928 -1.024 1.006 2.548 0.513 -0.618 -1.336 0.000 0.802 0.831 0.992 1.019 0.925 1.056 0.833
|
||||
1 0.364 1.648 0.560 1.720 0.829 1.110 0.811 -0.588 0.000 0.408 1.045 1.054 2.215 0.319 -1.138 1.545 0.000 0.423 1.025 -1.265 3.102 1.656 0.928 1.003 0.544 0.327 0.670 0.746
|
||||
1 0.525 -0.096 1.206 0.948 -1.103 1.519 -0.582 0.606 2.173 1.274 -0.572 -0.934 0.000 0.855 -1.028 -1.222 0.000 0.578 -1.000 -1.725 3.102 0.896 0.878 0.981 0.498 0.909 0.772 0.668
|
||||
0 0.536 -0.821 -1.029 0.703 1.113 0.363 -0.711 0.022 1.087 0.325 1.503 1.249 2.215 0.673 1.041 -0.401 0.000 0.480 2.127 1.681 0.000 0.767 1.034 0.990 0.671 0.836 0.669 0.663
|
||||
1 1.789 -0.583 1.641 0.897 0.799 0.515 -0.100 -1.483 0.000 1.101 0.031 -0.326 2.215 1.195 0.001 0.126 2.548 0.768 -0.148 0.601 0.000 0.916 0.921 1.207 1.069 0.483 0.934 0.795
|
||||
1 1.332 -0.571 0.986 0.580 1.508 0.582 0.634 -0.746 1.087 1.084 -0.964 -0.489 0.000 0.785 0.274 0.343 2.548 0.779 0.721 1.489 0.000 1.733 1.145 0.990 1.270 0.715 0.897 0.915
|
||||
0 1.123 0.629 -1.708 0.597 -0.882 0.752 0.195 1.522 2.173 1.671 1.515 -0.003 0.000 0.778 0.514 0.139 1.274 0.801 1.260 1.600 0.000 1.495 0.976 0.988 0.676 0.921 1.010 0.943
|
||||
0 1.816 -0.515 0.171 0.980 -0.454 0.870 0.202 -1.399 2.173 1.130 1.066 -1.593 0.000 0.844 0.735 1.275 2.548 1.125 -1.133 0.348 0.000 0.837 0.693 0.988 1.112 0.784 1.009 0.974
|
||||
1 0.364 0.694 0.445 1.862 0.159 0.963 -1.356 1.260 1.087 0.887 -0.540 -1.533 2.215 0.658 -2.544 -1.236 0.000 0.516 -0.807 0.039 0.000 0.891 1.004 0.991 1.092 0.976 1.000 0.953
|
||||
1 0.790 -1.175 0.475 1.846 0.094 0.999 -1.090 0.257 0.000 1.422 0.854 1.112 2.215 1.302 1.004 -1.702 1.274 2.557 -0.787 -1.048 0.000 0.890 1.429 0.993 2.807 0.840 2.248 1.821
|
||||
1 0.765 -0.500 -0.603 1.843 -0.560 1.068 0.007 0.746 2.173 1.154 -0.017 1.329 0.000 1.165 1.791 -1.585 0.000 1.116 0.441 -0.886 0.000 0.774 0.982 0.989 1.102 0.633 1.178 1.021
|
||||
1 1.407 1.293 -1.418 0.502 -1.527 2.005 -2.122 0.622 0.000 1.699 1.508 -0.649 2.215 1.665 0.748 -0.755 0.000 2.555 0.811 1.423 1.551 7.531 5.520 0.985 1.115 1.881 4.487 3.379
|
||||
1 0.772 -0.186 -1.372 0.823 -0.140 0.781 0.763 0.046 2.173 1.128 0.516 1.380 0.000 0.797 -0.640 -0.134 2.548 2.019 -0.972 -1.670 0.000 2.022 1.466 0.989 0.856 0.808 1.230 0.991
|
||||
1 0.546 -0.954 0.715 1.335 -1.689 0.783 -0.443 -1.735 2.173 1.081 0.185 -0.435 0.000 1.433 -0.662 -0.389 0.000 0.969 0.924 1.099 0.000 0.910 0.879 0.988 0.683 0.753 0.878 0.865
|
||||
1 0.596 0.276 -1.054 1.358 1.355 1.444 1.813 -0.208 0.000 1.175 -0.949 -1.573 0.000 0.855 -1.228 -0.925 2.548 1.837 -0.400 0.913 0.000 0.637 0.901 1.028 0.553 0.790 0.679 0.677
|
||||
0 0.458 2.292 1.530 0.291 1.283 0.749 -0.930 -0.198 0.000 0.300 -1.560 0.990 0.000 0.811 -0.176 0.995 2.548 1.085 -0.178 -1.213 3.102 0.891 0.648 0.999 0.732 0.655 0.619 0.620
|
||||
0 0.638 -0.575 -1.048 0.125 0.178 0.846 -0.753 -0.339 1.087 0.799 -0.727 1.182 0.000 0.888 0.283 0.717 0.000 1.051 -1.046 -1.557 3.102 0.889 0.871 0.989 0.884 0.923 0.836 0.779
|
||||
1 0.434 -1.119 -0.313 2.427 0.461 0.497 0.261 -1.177 2.173 0.618 -0.737 -0.688 0.000 1.150 -1.237 -1.652 2.548 0.757 -0.054 1.700 0.000 0.809 0.741 0.982 1.450 0.936 1.086 0.910
|
||||
1 0.431 -1.144 -1.030 0.778 -0.655 0.490 0.047 -1.546 0.000 1.583 -0.014 0.891 2.215 0.516 0.956 0.567 2.548 0.935 -1.123 -0.082 0.000 0.707 0.995 0.995 0.700 0.602 0.770 0.685
|
||||
1 1.894 0.222 1.224 1.578 1.715 0.966 2.890 -0.013 0.000 0.922 -0.703 -0.844 0.000 0.691 2.056 1.039 0.000 0.900 -0.733 -1.240 3.102 1.292 1.992 1.026 0.881 0.684 1.759 1.755
|
||||
0 0.985 -0.316 0.141 1.067 -0.946 0.819 -1.177 1.307 2.173 1.080 -0.429 0.557 1.107 1.726 1.435 -1.075 0.000 1.100 1.547 -0.647 0.000 0.873 1.696 1.179 1.146 1.015 1.538 1.270
|
||||
0 0.998 -0.187 -0.236 0.882 0.755 0.468 0.950 -0.439 2.173 0.579 -0.550 -0.624 0.000 1.847 1.196 1.384 1.274 0.846 1.273 -1.072 0.000 1.194 0.797 1.013 1.319 1.174 0.963 0.898
|
||||
0 0.515 0.246 -0.593 1.082 1.591 0.912 -0.623 -0.957 2.173 0.858 0.418 0.844 0.000 0.948 2.519 1.599 0.000 1.158 1.385 -0.095 3.102 0.973 1.033 0.988 0.998 1.716 1.054 0.901
|
||||
0 0.919 -1.001 1.506 1.389 0.653 0.507 -0.616 -0.689 2.173 0.808 0.536 -0.467 2.215 0.496 2.187 -0.859 0.000 0.822 0.807 1.163 0.000 0.876 0.861 1.088 0.947 0.614 0.911 1.087
|
||||
0 0.794 0.051 1.477 1.504 -1.695 0.716 0.315 0.264 1.087 0.879 -0.135 -1.094 2.215 1.433 -0.741 0.201 0.000 1.566 0.534 -0.989 0.000 0.627 0.882 0.974 0.807 1.130 0.929 0.925
|
||||
1 0.455 -0.946 -1.175 1.453 -0.580 0.763 -0.856 0.840 0.000 0.829 1.223 1.174 2.215 0.714 0.638 -0.466 0.000 1.182 0.223 -1.333 0.000 0.977 0.938 0.986 0.713 0.714 0.796 0.843
|
||||
1 0.662 -0.296 -1.287 1.212 -0.707 0.641 1.457 0.222 0.000 0.600 0.525 -1.700 2.215 0.784 -0.835 -0.961 2.548 0.865 1.131 1.162 0.000 0.854 0.877 0.978 0.740 0.734 0.888 0.811
|
||||
0 0.390 0.698 -1.629 1.888 0.298 0.990 1.614 -1.572 0.000 1.666 0.170 0.719 2.215 1.590 1.064 -0.886 1.274 0.952 0.305 -1.216 0.000 1.048 0.897 1.173 0.891 1.936 1.273 1.102
|
||||
0 1.014 0.117 1.384 0.686 -1.047 0.609 -1.245 -0.850 0.000 1.076 -1.158 0.814 1.107 1.598 -0.389 -0.111 0.000 0.907 1.688 -1.673 0.000 1.333 0.866 0.989 0.975 0.442 0.797 0.788
|
||||
0 1.530 -1.408 -0.207 0.440 -1.357 0.902 -0.647 1.325 1.087 1.320 -0.819 0.246 1.107 0.503 1.407 -1.683 0.000 1.189 -0.972 -0.925 0.000 0.386 1.273 0.988 0.829 1.335 1.173 1.149
|
||||
1 1.689 -0.590 0.915 2.076 1.202 0.644 -0.478 -0.238 0.000 0.809 -1.660 -1.184 0.000 1.227 -0.224 -0.808 2.548 1.655 1.047 -0.623 0.000 0.621 1.192 0.988 1.309 0.866 0.924 1.012
|
||||
0 1.102 0.402 -1.622 1.262 1.022 0.576 0.271 -0.269 0.000 0.591 0.495 -1.278 0.000 1.271 0.209 0.575 2.548 0.941 0.964 -0.685 3.102 0.989 0.963 1.124 0.857 0.858 0.716 0.718
|
||||
0 2.491 0.825 0.581 1.593 0.205 0.782 -0.815 1.499 0.000 1.179 -0.999 -1.509 0.000 0.926 0.920 -0.522 2.548 2.068 -1.021 -1.050 3.102 0.874 0.943 0.980 0.945 1.525 1.570 1.652
|
||||
0 0.666 0.254 1.601 1.303 -0.250 1.236 -1.929 0.793 0.000 1.074 0.447 -0.871 0.000 0.991 1.059 -0.342 0.000 1.703 -0.393 -1.419 3.102 0.921 0.945 1.285 0.931 0.462 0.770 0.729
|
||||
0 0.937 -1.126 1.424 1.395 1.743 0.760 0.428 -0.238 2.173 0.846 0.494 1.320 2.215 0.872 -1.826 -0.507 0.000 0.612 1.860 1.403 0.000 3.402 2.109 0.985 1.298 1.165 1.404 1.240
|
||||
1 0.881 -1.086 -0.870 0.513 0.266 2.049 -1.870 1.160 0.000 2.259 -0.428 -0.935 2.215 1.321 -0.655 -0.449 2.548 1.350 -1.766 -0.108 0.000 0.911 1.852 0.987 1.167 0.820 1.903 1.443
|
||||
0 0.410 0.835 -0.819 1.257 1.112 0.871 -1.737 -0.401 0.000 0.927 0.158 1.253 0.000 1.183 0.405 -1.570 0.000 0.807 -0.704 -0.438 3.102 0.932 0.962 0.987 0.653 0.315 0.616 0.648
|
||||
1 0.634 0.196 -1.679 1.379 -0.967 2.260 -0.273 1.114 0.000 1.458 1.070 -0.278 1.107 1.195 0.110 -0.688 2.548 0.907 0.298 -1.359 0.000 0.949 1.129 0.984 0.675 0.877 0.938 0.824
|
||||
1 0.632 -1.254 1.201 0.496 -0.106 0.235 2.731 -0.955 0.000 0.615 -0.805 0.600 0.000 0.633 -0.934 1.641 0.000 1.407 -0.483 -0.962 1.551 0.778 0.797 0.989 0.578 0.722 0.576 0.539
|
||||
0 0.714 1.122 1.566 2.399 -1.431 1.665 0.299 0.323 0.000 1.489 1.087 -0.861 2.215 1.174 0.140 1.083 2.548 0.404 -0.968 1.105 0.000 0.867 0.969 0.981 1.039 1.552 1.157 1.173
|
||||
1 0.477 -0.321 -0.471 1.966 1.034 2.282 1.359 -0.874 0.000 1.672 -0.258 1.109 0.000 1.537 0.604 0.231 2.548 1.534 -0.640 0.827 0.000 0.746 1.337 1.311 0.653 0.721 0.795 0.742
|
||||
1 1.351 0.460 0.031 1.194 -1.185 0.670 -1.157 -1.637 2.173 0.599 -0.823 0.680 0.000 0.478 0.373 1.716 0.000 0.809 -0.919 0.010 1.551 0.859 0.839 1.564 0.994 0.777 0.971 0.826
|
||||
1 0.520 -1.442 -0.348 0.840 1.654 1.273 -0.760 1.317 0.000 0.861 2.579 -0.791 0.000 1.779 0.257 -0.703 0.000 2.154 1.928 0.457 0.000 1.629 3.194 0.992 0.730 1.107 2.447 2.747
|
||||
0 0.700 -0.308 0.920 0.438 -0.879 0.516 1.409 1.101 0.000 0.960 0.701 -0.049 2.215 1.442 -0.416 -1.439 2.548 0.628 1.009 -0.364 0.000 0.848 0.817 0.987 0.759 1.421 0.937 0.920
|
||||
1 0.720 1.061 -0.546 0.798 -1.521 1.066 0.173 0.271 1.087 1.453 0.114 1.336 1.107 0.702 0.616 -0.367 0.000 0.543 -0.386 -1.301 0.000 0.653 0.948 0.989 1.031 1.500 0.965 0.790
|
||||
1 0.735 -0.416 0.588 1.308 -0.382 1.042 0.344 1.609 0.000 0.926 0.163 -0.520 1.107 1.050 -0.427 1.159 2.548 0.834 0.613 0.948 0.000 0.848 1.189 1.042 0.844 1.099 0.829 0.843
|
||||
1 0.777 -0.396 1.540 1.608 0.638 0.955 0.040 0.918 2.173 1.315 1.116 -0.823 0.000 0.781 -0.762 0.564 2.548 0.945 -0.573 1.379 0.000 0.679 0.706 1.124 0.608 0.593 0.515 0.493
|
||||
1 0.934 0.319 -0.257 0.970 -0.980 0.726 0.774 0.731 0.000 0.896 0.038 -1.465 1.107 0.773 -0.055 -0.831 2.548 1.439 -0.229 0.698 0.000 0.964 1.031 0.995 0.845 0.480 0.810 0.762
|
||||
0 0.461 0.771 0.019 2.055 -1.288 1.043 0.147 0.261 2.173 0.833 -0.156 1.425 0.000 0.832 0.805 -0.491 2.548 0.589 1.252 1.414 0.000 0.850 0.906 1.245 1.364 0.850 0.908 0.863
|
||||
1 0.858 -0.116 -0.937 0.966 1.167 0.825 -0.108 1.111 1.087 0.733 1.163 -0.634 0.000 0.894 0.771 0.020 0.000 0.846 -1.124 -1.195 3.102 0.724 1.194 1.195 0.813 0.969 0.985 0.856
|
||||
0 0.720 -0.335 -0.307 1.445 0.540 1.108 -0.034 -1.691 1.087 0.883 -1.356 -0.678 2.215 0.440 1.093 0.253 0.000 0.389 -1.582 -1.097 0.000 1.113 1.034 0.988 1.256 1.572 1.062 0.904
|
||||
1 0.750 -0.811 -0.542 0.985 0.408 0.471 0.477 0.355 0.000 1.347 -0.875 -1.556 2.215 0.564 1.082 -0.724 0.000 0.793 -0.958 -0.020 3.102 0.836 0.825 0.986 1.066 0.924 0.927 0.883
|
||||
0 0.392 -0.468 -0.216 0.680 1.565 1.086 -0.765 -0.581 1.087 1.264 -1.035 1.189 2.215 0.986 -0.338 0.747 0.000 0.884 -1.328 -0.965 0.000 1.228 0.988 0.982 1.135 1.741 1.108 0.956
|
||||
1 0.434 -1.269 0.643 0.713 0.608 0.597 0.832 1.627 0.000 0.708 -0.422 0.079 2.215 1.533 -0.823 -1.127 2.548 0.408 -1.357 -0.828 0.000 1.331 1.087 0.999 1.075 1.015 0.875 0.809
|
||||
0 0.828 -1.803 0.342 0.847 -0.162 1.585 -1.128 -0.272 2.173 1.974 0.039 -1.717 0.000 0.900 0.764 -1.741 0.000 1.349 -0.079 1.035 3.102 0.984 0.815 0.985 0.780 1.661 1.403 1.184
|
||||
1 1.089 -0.350 -0.747 1.472 0.792 1.087 -0.069 -1.192 0.000 0.512 -0.841 -1.284 0.000 2.162 -0.821 0.545 2.548 1.360 2.243 -0.183 0.000 0.977 0.628 1.725 1.168 0.635 0.823 0.822
|
||||
1 0.444 0.451 -1.332 1.176 -0.247 0.898 0.194 0.007 0.000 1.958 0.576 -1.618 2.215 0.584 1.203 0.268 0.000 0.939 1.033 1.264 3.102 0.829 0.886 0.985 1.265 0.751 1.032 0.948
|
||||
0 0.629 0.114 1.177 0.917 -1.204 0.845 0.828 -0.088 0.000 0.962 -1.302 0.823 2.215 0.732 0.358 -1.334 2.548 0.538 0.582 1.561 0.000 1.028 0.834 0.988 0.904 1.205 1.039 0.885
|
||||
1 1.754 -1.259 -0.573 0.959 -1.483 0.358 0.448 -1.452 0.000 0.711 0.313 0.499 2.215 1.482 -0.390 1.474 2.548 1.879 -1.540 0.668 0.000 0.843 0.825 1.313 1.315 0.939 1.048 0.871
|
||||
1 0.549 0.706 -1.437 0.894 0.891 0.680 -0.762 -1.568 0.000 0.981 0.499 -0.425 2.215 1.332 0.678 0.485 1.274 0.803 0.022 -0.893 0.000 0.793 1.043 0.987 0.761 0.899 0.915 0.794
|
||||
0 0.475 0.542 -0.987 1.569 0.069 0.551 1.543 -1.488 0.000 0.608 0.301 1.734 2.215 0.277 0.499 -0.522 0.000 1.375 1.212 0.696 3.102 0.652 0.756 0.987 0.828 0.830 0.715 0.679
|
||||
1 0.723 0.049 -1.153 1.300 0.083 0.723 -0.749 0.630 0.000 1.126 0.412 -0.384 0.000 1.272 1.256 1.358 2.548 3.108 0.777 -1.486 3.102 0.733 1.096 1.206 1.269 0.899 1.015 0.903
|
||||
1 1.062 0.296 0.725 0.285 -0.531 0.819 1.277 -0.667 0.000 0.687 0.829 -0.092 0.000 1.158 0.447 1.047 2.548 1.444 -0.186 -1.491 3.102 0.863 1.171 0.986 0.769 0.828 0.919 0.840
|
||||
0 0.572 -0.349 1.396 2.023 0.795 0.577 0.457 -0.533 0.000 1.351 0.701 -1.091 0.000 0.724 -1.012 -0.182 2.548 0.923 -0.012 0.789 3.102 0.936 1.025 0.985 1.002 0.600 0.828 0.909
|
||||
1 0.563 0.387 0.412 0.553 1.050 0.723 -0.992 -0.447 0.000 0.748 0.948 0.546 2.215 1.761 -0.559 -1.183 0.000 1.114 -0.251 1.192 3.102 0.936 0.912 0.976 0.578 0.722 0.829 0.892
|
||||
1 1.632 1.577 -0.697 0.708 -1.263 0.863 0.012 1.197 2.173 0.498 0.990 -0.806 0.000 0.627 2.387 -1.283 0.000 0.607 1.290 -0.174 3.102 0.916 1.328 0.986 0.557 0.971 0.935 0.836
|
||||
1 0.562 -0.360 0.399 0.803 -1.334 1.443 -0.116 1.628 2.173 0.750 0.987 0.135 1.107 0.795 0.298 -0.556 0.000 1.150 -0.113 -0.093 0.000 0.493 1.332 0.985 1.001 1.750 1.013 0.886
|
||||
1 0.987 0.706 -0.492 0.861 0.607 0.593 0.088 -0.184 0.000 0.802 0.894 1.608 2.215 0.782 -0.471 1.500 2.548 0.521 0.772 -0.960 0.000 0.658 0.893 1.068 0.877 0.664 0.709 0.661
|
||||
1 1.052 0.883 -0.581 1.566 0.860 0.931 1.515 -0.873 0.000 0.493 0.145 -0.672 0.000 1.133 0.935 1.581 2.548 1.630 0.695 0.923 3.102 1.105 1.087 1.713 0.948 0.590 0.872 0.883
|
||||
1 2.130 -0.516 -0.291 0.776 -1.230 0.689 -0.257 0.800 2.173 0.730 -0.274 -1.437 0.000 0.615 0.241 1.083 0.000 0.834 0.757 1.613 3.102 0.836 0.806 1.333 1.061 0.730 0.889 0.783
|
||||
1 0.742 0.797 1.628 0.311 -0.418 0.620 0.685 -1.457 0.000 0.683 1.774 -1.082 0.000 1.700 1.104 0.225 2.548 0.382 -2.184 -1.307 0.000 0.945 1.228 0.984 0.864 0.931 0.988 0.838
|
||||
0 0.311 -1.249 -0.927 1.272 -1.262 0.642 -1.228 -0.136 0.000 1.220 -0.804 -1.558 2.215 0.950 -0.828 0.495 1.274 2.149 -1.672 0.634 0.000 1.346 0.887 0.981 0.856 1.101 1.001 1.106
|
||||
0 0.660 -1.834 -0.667 0.601 1.236 0.932 -0.933 -0.135 2.173 1.373 -0.122 1.429 0.000 0.654 -0.034 -0.847 2.548 0.711 0.911 0.703 0.000 1.144 0.942 0.984 0.822 0.739 0.992 0.895
|
||||
0 3.609 -0.590 0.851 0.615 0.455 1.280 0.003 -0.866 1.087 1.334 0.708 -1.131 0.000 0.669 0.480 0.092 0.000 0.975 0.983 -1.429 3.102 1.301 1.089 0.987 1.476 0.934 1.469 1.352
|
||||
1 0.905 -0.403 1.567 2.651 0.953 1.194 -0.241 -0.567 1.087 0.308 -0.384 -0.007 0.000 0.608 -0.175 -1.163 2.548 0.379 0.941 1.662 0.000 0.580 0.721 1.126 0.895 0.544 1.097 0.836
|
||||
1 0.983 0.255 1.093 0.905 -0.874 0.863 0.060 -0.368 0.000 0.824 -0.747 -0.633 0.000 0.614 0.961 1.052 0.000 0.792 -0.260 1.632 3.102 0.874 0.883 1.280 0.663 0.406 0.592 0.645
|
||||
1 1.160 -1.027 0.274 0.460 0.322 2.085 -1.623 -0.840 0.000 1.634 -1.046 1.182 2.215 0.492 -0.367 1.174 0.000 0.824 -0.998 1.617 0.000 0.943 0.884 1.001 1.209 1.313 1.034 0.866
|
||||
0 0.299 0.028 -1.372 1.930 -0.661 0.840 -0.979 0.664 1.087 0.535 -2.041 1.434 0.000 1.087 -1.797 0.344 0.000 0.485 -0.560 -1.105 3.102 0.951 0.890 0.980 0.483 0.684 0.730 0.706
|
||||
0 0.293 1.737 -1.418 2.074 0.794 0.679 1.024 -1.457 0.000 1.034 1.094 -0.168 1.107 0.506 1.680 -0.661 0.000 0.523 -0.042 -1.274 3.102 0.820 0.944 0.987 0.842 0.694 0.761 0.750
|
||||
0 0.457 -0.393 1.560 0.738 -0.007 0.475 -0.230 0.246 0.000 0.776 -1.264 -0.606 2.215 0.865 -0.731 -1.576 2.548 1.153 0.343 1.436 0.000 1.060 0.883 0.988 0.972 0.703 0.758 0.720
|
||||
0 0.935 -0.582 0.240 2.401 0.818 1.231 -0.618 -1.289 0.000 0.799 0.544 -0.228 2.215 0.525 -1.494 -0.969 0.000 0.609 -1.123 1.168 3.102 0.871 0.767 1.035 1.154 0.919 0.868 1.006
|
||||
1 0.902 -0.745 -1.215 1.174 -0.501 1.215 0.167 1.162 0.000 0.896 1.217 -0.976 0.000 0.585 -0.429 1.036 0.000 1.431 -0.416 0.151 3.102 0.524 0.952 0.990 0.707 0.271 0.592 0.826
|
||||
1 0.653 0.337 -0.320 1.118 -0.934 1.050 0.745 0.529 1.087 1.075 1.742 -1.538 0.000 0.585 1.090 0.973 0.000 1.091 -0.187 1.160 1.551 1.006 1.108 0.978 1.121 0.838 0.947 0.908
|
||||
0 1.157 1.401 0.340 0.395 -1.218 0.945 1.928 -0.876 0.000 1.384 0.320 1.002 1.107 1.900 1.177 -0.462 2.548 1.122 1.316 1.720 0.000 1.167 1.096 0.989 0.937 1.879 1.307 1.041
|
||||
0 0.960 0.355 -0.152 0.872 -0.338 0.391 0.348 0.956 1.087 0.469 2.664 1.409 0.000 0.756 -1.561 1.500 0.000 0.525 1.436 1.728 3.102 1.032 0.946 0.996 0.929 0.470 0.698 0.898
|
||||
1 1.038 0.274 0.825 1.198 0.963 1.078 -0.496 -1.014 2.173 0.739 -0.727 -0.151 2.215 1.035 -0.799 0.398 0.000 1.333 -0.872 -1.498 0.000 0.849 1.033 0.985 0.886 0.936 0.975 0.823
|
||||
0 0.490 0.277 0.318 1.303 0.694 1.333 -1.620 -0.563 0.000 1.459 -1.326 1.140 0.000 0.779 -0.673 -1.324 2.548 0.860 -1.247 0.043 0.000 0.857 0.932 0.992 0.792 0.278 0.841 1.498
|
||||
0 1.648 -0.688 -1.386 2.790 0.995 1.087 1.359 -0.687 0.000 1.050 -0.223 -0.261 2.215 0.613 -0.889 1.335 0.000 1.204 0.827 0.309 3.102 0.464 0.973 2.493 1.737 0.827 1.319 1.062
|
||||
0 1.510 -0.662 1.668 0.860 0.280 0.705 0.974 -1.647 1.087 0.662 -0.393 -0.225 0.000 0.610 -0.996 0.532 2.548 0.464 1.305 0.102 0.000 0.859 1.057 1.498 0.799 1.260 0.946 0.863
|
||||
1 0.850 -1.185 -0.117 0.943 -0.449 1.142 0.875 -0.030 0.000 2.223 -0.461 1.627 2.215 0.767 -1.761 -1.692 0.000 1.012 -0.727 0.639 3.102 3.649 2.062 0.985 1.478 1.087 1.659 1.358
|
||||
0 0.933 1.259 0.130 0.326 -0.890 0.306 1.136 1.142 0.000 0.964 0.705 -1.373 2.215 0.546 -0.196 -0.001 0.000 0.578 -1.169 1.004 3.102 0.830 0.836 0.988 0.837 1.031 0.749 0.655
|
||||
0 0.471 0.697 1.570 1.109 0.201 1.248 0.348 -1.448 0.000 2.103 0.773 0.686 2.215 1.451 -0.087 -0.453 2.548 1.197 -0.045 -1.026 0.000 0.793 1.094 0.987 0.851 1.804 1.378 1.089
|
||||
1 2.446 -0.701 -1.568 0.059 0.822 1.401 -0.600 -0.044 2.173 0.324 -0.001 1.344 2.215 0.913 -0.818 1.049 0.000 0.442 -1.088 -0.005 0.000 0.611 1.062 0.979 0.562 0.988 0.998 0.806
|
||||
0 0.619 2.029 0.933 0.528 -0.903 0.974 0.760 -0.311 2.173 0.825 0.658 -1.466 1.107 0.894 1.594 0.370 0.000 0.882 -0.258 1.661 0.000 1.498 1.088 0.987 0.867 1.139 0.900 0.779
|
||||
1 0.674 -0.131 -0.362 0.518 -1.574 0.876 0.442 0.145 1.087 0.497 -1.526 -1.704 0.000 0.680 2.514 -1.374 0.000 0.792 -0.479 0.773 1.551 0.573 1.198 0.984 0.800 0.667 0.987 0.832
|
||||
1 1.447 1.145 -0.937 0.307 -1.458 0.478 1.264 0.816 1.087 0.558 1.015 -0.101 2.215 0.937 -0.190 1.177 0.000 0.699 0.954 -1.512 0.000 0.877 0.838 0.990 0.873 0.566 0.646 0.713
|
||||
1 0.976 0.308 -0.844 0.436 0.610 1.253 0.149 -1.585 2.173 1.415 0.568 0.096 2.215 0.953 -0.855 0.441 0.000 0.867 -0.650 1.643 0.000 0.890 1.234 0.988 0.796 2.002 1.179 0.977
|
||||
0 0.697 0.401 -0.718 0.920 0.735 0.958 -0.172 0.168 2.173 0.872 -0.097 -1.335 0.000 0.513 -1.192 -1.710 1.274 0.426 -1.637 1.368 0.000 0.997 1.227 1.072 0.800 1.013 0.786 0.749
|
||||
1 1.305 -2.157 1.740 0.661 -0.912 0.705 -0.516 0.759 2.173 0.989 -0.716 -0.300 2.215 0.627 -1.052 -1.736 0.000 0.467 -2.467 0.568 0.000 0.807 0.964 0.988 1.427 1.012 1.165 0.926
|
||||
0 1.847 1.663 -0.618 0.280 1.258 1.462 -0.054 1.371 0.000 0.900 0.309 -0.544 0.000 0.331 -2.149 -0.341 0.000 1.091 -0.833 0.710 3.102 1.496 0.931 0.989 1.549 0.115 1.140 1.150
|
||||
0 0.410 -0.323 1.069 2.160 0.010 0.892 0.942 -1.640 2.173 0.946 0.938 1.314 0.000 1.213 -1.099 -0.794 2.548 0.650 0.053 0.056 0.000 1.041 0.916 1.063 0.985 1.910 1.246 1.107
|
||||
1 0.576 1.092 -0.088 0.777 -1.579 0.757 0.271 0.109 0.000 0.819 0.827 -1.554 2.215 1.313 2.341 -1.568 0.000 2.827 0.239 -0.338 0.000 0.876 0.759 0.986 0.692 0.457 0.796 0.791
|
||||
1 0.537 0.925 -1.406 0.306 -0.050 0.906 1.051 0.037 0.000 1.469 -0.177 -1.320 2.215 1.872 0.723 1.158 0.000 1.313 0.227 -0.501 3.102 0.953 0.727 0.978 0.755 0.892 0.932 0.781
|
||||
0 0.716 -0.065 -0.484 1.313 -1.563 0.596 -0.242 0.678 2.173 0.426 -1.909 0.616 0.000 0.885 -0.406 -1.343 2.548 0.501 -1.327 -0.340 0.000 0.470 0.728 1.109 0.919 0.881 0.665 0.692
|
||||
1 0.624 -0.389 0.128 1.636 -1.110 1.025 0.573 -0.843 2.173 0.646 -0.697 1.064 0.000 0.632 -1.442 0.961 0.000 0.863 -0.106 1.717 0.000 0.825 0.917 1.257 0.983 0.713 0.890 0.824
|
||||
0 0.484 2.101 1.714 1.131 -0.823 0.750 0.583 -1.304 1.087 0.894 0.421 0.559 2.215 0.921 -0.063 0.282 0.000 0.463 -0.474 -1.387 0.000 0.742 0.886 0.995 0.993 1.201 0.806 0.754
|
||||
0 0.570 0.339 -1.478 0.528 0.439 0.978 1.479 -1.411 2.173 0.763 1.541 -0.734 0.000 1.375 0.840 0.903 0.000 0.965 1.599 0.364 0.000 0.887 1.061 0.992 1.322 1.453 1.013 0.969
|
||||
0 0.940 1.303 1.636 0.851 -1.732 0.803 -0.030 -0.177 0.000 0.480 -0.125 -0.954 0.000 0.944 0.709 0.296 2.548 1.342 -0.418 1.197 3.102 0.853 0.989 0.979 0.873 0.858 0.719 0.786
|
||||
1 0.599 0.544 -0.238 0.816 1.043 0.857 0.660 1.128 2.173 0.864 -0.624 -0.843 0.000 1.159 0.367 0.174 0.000 1.520 -0.543 -1.508 0.000 0.842 0.828 0.984 0.759 0.895 0.918 0.791
|
||||
1 1.651 1.897 -0.914 0.423 0.315 0.453 0.619 -1.607 2.173 0.532 -0.424 0.209 1.107 0.369 2.479 0.034 0.000 0.701 0.217 0.984 0.000 0.976 0.951 1.035 0.879 0.825 0.915 0.798
|
||||
1 0.926 -0.574 -0.763 0.285 1.094 0.672 2.314 1.545 0.000 1.124 0.415 0.809 0.000 1.387 0.270 -0.949 2.548 1.547 -0.631 -0.200 3.102 0.719 0.920 0.986 0.889 0.933 0.797 0.777
|
||||
0 0.677 1.698 -0.890 0.641 -0.449 0.607 1.754 1.720 0.000 0.776 0.372 0.782 2.215 0.511 1.491 -0.480 0.000 0.547 -0.341 0.853 3.102 0.919 1.026 0.997 0.696 0.242 0.694 0.687
|
||||
0 1.266 0.602 0.958 0.487 1.256 0.709 0.843 -1.196 0.000 0.893 1.303 -0.594 1.107 1.090 1.320 0.354 0.000 0.797 1.846 1.139 0.000 0.780 0.896 0.986 0.661 0.709 0.790 0.806
|
||||
1 0.628 -0.616 -0.329 0.764 -1.150 0.477 -0.715 1.187 2.173 1.250 0.607 1.026 2.215 0.983 -0.023 -0.583 0.000 0.377 1.344 -1.015 0.000 0.744 0.954 0.987 0.837 0.841 0.795 0.694
|
||||
1 1.035 -0.828 -1.358 1.870 -1.060 1.075 0.130 0.448 2.173 0.660 0.697 0.641 0.000 0.425 1.006 -1.035 0.000 0.751 1.055 1.364 3.102 0.826 0.822 0.988 0.967 0.901 1.077 0.906
|
||||
1 0.830 0.265 -0.150 0.660 1.105 0.592 -0.557 0.908 2.173 0.670 -1.419 -0.671 0.000 1.323 -0.409 1.644 2.548 0.850 -0.033 -0.615 0.000 0.760 0.967 0.984 0.895 0.681 0.747 0.770
|
||||
1 1.395 1.100 1.167 1.088 0.218 0.400 -0.132 0.024 2.173 0.743 0.530 -1.361 2.215 0.341 -0.691 -0.238 0.000 0.396 -1.426 -0.933 0.000 0.363 0.472 1.287 0.922 0.810 0.792 0.656
|
||||
1 1.070 1.875 -1.298 1.215 -0.106 0.767 0.795 0.514 1.087 0.401 2.780 1.276 0.000 0.686 1.127 1.721 2.548 0.391 -0.259 -1.167 0.000 1.278 1.113 1.389 0.852 0.824 0.838 0.785
|
||||
0 1.114 -0.071 1.719 0.399 -1.383 0.849 0.254 0.481 0.000 0.958 -0.579 0.742 0.000 1.190 -0.140 -0.862 2.548 0.479 1.390 0.856 0.000 0.952 0.988 0.985 0.764 0.419 0.835 0.827
|
||||
0 0.714 0.376 -0.568 1.578 -1.165 0.648 0.141 0.639 2.173 0.472 0.569 1.449 1.107 0.783 1.483 0.361 0.000 0.540 -0.790 0.032 0.000 0.883 0.811 0.982 0.775 0.572 0.760 0.745
|
||||
0 0.401 -1.731 0.765 0.974 1.648 0.652 -1.024 0.191 0.000 0.544 -0.366 -1.246 2.215 0.627 0.140 1.008 2.548 0.810 0.409 0.429 0.000 0.950 0.934 0.977 0.621 0.580 0.677 0.650
|
||||
1 0.391 1.679 -1.298 0.605 -0.832 0.549 1.338 0.522 2.173 1.244 0.884 1.070 0.000 1.002 0.846 -1.345 2.548 0.783 -2.464 -0.237 0.000 4.515 2.854 0.981 0.877 0.939 1.942 1.489
|
||||
1 0.513 -0.220 -0.444 1.699 0.479 1.109 0.181 -0.999 2.173 0.883 -0.335 -1.716 2.215 1.075 -0.380 1.352 0.000 0.857 0.048 0.147 0.000 0.937 0.758 0.986 1.206 0.958 0.949 0.876
|
||||
0 1.367 -0.388 0.798 1.158 1.078 0.811 -1.024 -1.628 0.000 1.504 0.097 -0.999 2.215 1.652 -0.860 0.054 2.548 0.573 -0.142 -1.401 0.000 0.869 0.833 1.006 1.412 1.641 1.214 1.041
|
||||
1 1.545 -0.533 -1.517 1.177 1.289 2.331 -0.370 -0.073 0.000 1.295 -0.358 -0.891 2.215 0.476 0.756 0.985 0.000 1.945 -0.016 -1.651 3.102 1.962 1.692 1.073 0.656 0.941 1.312 1.242
|
||||
0 0.858 0.978 -1.258 0.286 0.161 0.729 1.230 1.087 2.173 0.561 2.670 -0.109 0.000 0.407 2.346 0.938 0.000 1.078 0.729 -0.658 3.102 0.597 0.921 0.982 0.579 0.954 0.733 0.769
|
||||
1 1.454 -1.384 0.870 0.067 0.394 1.033 -0.673 0.318 0.000 1.166 -0.763 -1.533 2.215 2.848 -0.045 -0.856 2.548 0.697 -0.140 1.134 0.000 0.931 1.293 0.977 1.541 1.326 1.201 1.078
|
||||
1 0.559 -0.913 0.486 1.104 -0.321 1.073 -0.348 1.345 0.000 0.901 -0.827 -0.842 0.000 0.739 0.047 -0.415 2.548 0.433 -1.132 1.268 0.000 0.797 0.695 0.985 0.868 0.346 0.674 0.623
|
||||
1 1.333 0.780 -0.964 0.916 1.202 1.822 -0.071 0.742 2.173 1.486 -0.399 -0.824 0.000 0.740 0.568 -0.134 0.000 0.971 -0.070 -1.589 3.102 1.278 0.929 1.421 1.608 1.214 1.215 1.137
|
||||
1 2.417 0.631 -0.317 0.323 0.581 0.841 1.524 -1.738 0.000 0.543 1.176 -0.325 0.000 0.827 0.700 0.866 0.000 0.834 -0.262 -1.702 3.102 0.932 0.820 0.988 0.646 0.287 0.595 0.589
|
||||
0 0.955 -1.242 0.938 1.104 0.474 0.798 -0.743 1.535 0.000 1.356 -1.357 -1.080 2.215 1.320 -1.396 -0.132 2.548 0.728 -0.529 -0.633 0.000 0.832 0.841 0.988 0.923 1.077 0.988 0.816
|
||||
1 1.305 -1.918 0.391 1.161 0.063 0.724 2.593 1.481 0.000 0.592 -1.207 -0.329 0.000 0.886 -0.836 -1.168 2.548 1.067 -1.481 -1.440 0.000 0.916 0.688 0.991 0.969 0.550 0.665 0.638
|
||||
0 1.201 0.071 -1.123 2.242 -1.533 0.702 -0.256 0.688 0.000 0.967 0.491 1.040 2.215 1.271 -0.558 0.095 0.000 1.504 0.676 -0.383 3.102 0.917 1.006 0.985 1.017 1.057 0.928 1.057
|
||||
0 0.994 -1.607 1.596 0.774 -1.391 0.625 -0.134 -0.862 2.173 0.746 -0.765 -0.316 2.215 1.131 -0.320 0.869 0.000 0.607 0.826 0.301 0.000 0.798 0.967 0.999 0.880 0.581 0.712 0.774
|
||||
1 0.482 -0.467 0.729 1.419 1.458 0.824 0.376 -0.242 0.000 1.368 0.023 1.459 2.215 0.826 0.669 -1.079 2.548 0.936 2.215 -0.309 0.000 1.883 1.216 0.997 1.065 0.946 1.224 1.526
|
||||
1 0.383 1.588 1.611 0.748 1.194 0.866 -0.279 -0.636 0.000 0.707 0.536 0.801 2.215 1.647 -1.155 0.367 0.000 1.292 0.303 -1.681 3.102 2.016 1.581 0.986 0.584 0.684 1.107 0.958
|
||||
0 0.629 0.203 0.736 0.671 -0.271 1.350 -0.486 0.761 2.173 0.496 -0.805 -1.718 0.000 2.393 0.044 -1.046 1.274 0.651 -0.116 -0.541 0.000 0.697 1.006 0.987 1.069 2.317 1.152 0.902
|
||||
0 0.905 -0.564 -0.570 0.263 1.096 1.219 -1.397 -1.414 1.087 1.164 -0.533 -0.208 0.000 1.459 1.965 0.784 0.000 2.220 -1.421 0.452 0.000 0.918 1.360 0.993 0.904 0.389 2.118 1.707
|
||||
1 1.676 1.804 1.171 0.529 1.175 1.664 0.354 -0.530 0.000 1.004 0.691 -1.280 2.215 0.838 0.373 0.626 2.548 1.094 1.774 0.501 0.000 0.806 1.100 0.991 0.769 0.976 0.807 0.740
|
||||
1 1.364 -1.936 0.020 1.327 0.428 1.021 -1.665 -0.907 2.173 0.818 -2.701 1.303 0.000 0.716 -0.590 -1.629 2.548 0.895 -2.280 -1.602 0.000 1.211 0.849 0.989 1.320 0.864 1.065 0.949
|
||||
0 0.629 -0.626 0.609 1.828 1.280 0.644 -0.856 -0.873 2.173 0.555 1.066 -0.640 0.000 0.477 -1.364 -1.021 2.548 1.017 0.036 0.380 0.000 0.947 0.941 0.994 1.128 0.241 0.793 0.815
|
||||
1 1.152 -0.843 0.926 1.802 0.800 2.493 -1.449 -1.127 0.000 1.737 0.833 0.488 0.000 1.026 0.929 -0.990 2.548 1.408 0.689 1.142 3.102 1.171 0.956 0.993 2.009 0.867 1.499 1.474
|
||||
0 2.204 0.081 0.008 1.021 -0.679 2.676 0.090 1.163 0.000 2.210 -1.686 -1.195 0.000 1.805 0.891 -0.148 2.548 0.450 -0.502 -1.295 3.102 6.959 3.492 1.205 0.908 0.845 2.690 2.183
|
||||
1 0.957 0.954 1.702 0.043 -0.503 1.113 0.033 -0.308 0.000 0.757 -0.363 -1.129 2.215 1.635 0.068 1.048 1.274 0.415 -2.098 0.061 0.000 1.010 0.979 0.992 0.704 1.125 0.761 0.715
|
||||
0 1.222 0.418 1.059 1.303 1.442 0.282 -1.499 -1.286 0.000 1.567 0.016 -0.164 2.215 0.451 2.229 -1.229 0.000 0.660 -0.513 -0.296 3.102 2.284 1.340 0.985 1.531 0.314 1.032 1.094
|
||||
1 0.603 1.675 -0.973 0.703 -1.709 1.023 0.652 1.296 2.173 1.078 0.363 -0.263 0.000 0.734 -0.457 -0.745 1.274 0.561 1.434 -0.042 0.000 0.888 0.771 0.984 0.847 1.234 0.874 0.777
|
||||
0 0.897 0.949 -0.848 1.115 -0.085 0.522 -1.267 -1.418 0.000 0.684 -0.599 1.474 0.000 1.176 0.922 0.641 2.548 0.470 0.103 0.148 3.102 0.775 0.697 0.984 0.839 0.358 0.847 1.008
|
||||
1 0.987 1.013 -1.504 0.468 -0.259 1.160 0.476 -0.971 2.173 1.266 0.919 0.780 0.000 0.634 1.695 0.233 0.000 0.487 -0.082 0.719 3.102 0.921 0.641 0.991 0.730 0.828 0.952 0.807
|
||||
1 0.847 1.581 -1.397 1.629 1.529 1.053 0.816 -0.344 2.173 0.895 0.779 0.332 0.000 0.750 1.311 0.419 2.548 1.604 0.844 1.367 0.000 1.265 0.798 0.989 1.328 0.783 0.930 0.879
|
||||
1 0.805 1.416 -1.327 0.397 0.589 0.488 0.982 0.843 0.000 0.664 -0.999 0.129 0.000 0.624 0.613 -0.558 0.000 1.431 -0.667 -1.561 3.102 0.959 1.103 0.989 0.590 0.632 0.926 0.798
|
||||
0 1.220 -0.313 -0.489 1.759 0.201 1.698 -0.220 0.241 2.173 1.294 1.390 -1.682 0.000 1.447 -1.623 -1.296 0.000 1.710 0.872 -1.356 3.102 1.198 0.981 1.184 0.859 2.165 1.807 1.661
|
||||
0 0.772 -0.611 -0.549 0.465 -1.528 1.103 -0.140 0.001 2.173 0.854 -0.406 1.655 0.000 0.733 -1.250 1.072 0.000 0.883 0.627 -1.132 3.102 0.856 0.927 0.987 1.094 1.013 0.938 0.870
|
||||
1 1.910 0.771 0.828 0.231 1.267 1.398 1.455 -0.295 2.173 0.837 -2.564 0.770 0.000 0.540 2.189 1.287 0.000 1.345 1.311 -1.151 0.000 0.861 0.869 0.984 1.359 1.562 1.105 0.963
|
||||
1 0.295 0.832 1.399 1.222 -0.517 2.480 0.013 1.591 0.000 2.289 0.436 0.287 2.215 1.995 -0.367 -0.409 1.274 0.375 1.367 -1.716 0.000 1.356 2.171 0.990 1.467 1.664 1.855 1.705
|
||||
1 1.228 0.339 -0.575 0.417 1.474 0.480 -1.416 -1.498 2.173 0.614 -0.933 -0.961 0.000 1.189 1.690 1.003 0.000 1.690 -1.065 0.106 3.102 0.963 1.147 0.987 1.086 0.948 0.930 0.866
|
||||
0 2.877 -1.014 1.440 0.782 0.483 1.134 -0.735 -0.196 2.173 1.123 0.084 -0.596 0.000 1.796 -0.356 1.044 2.548 1.406 1.582 -0.991 0.000 0.939 1.178 1.576 0.996 1.629 1.216 1.280
|
||||
1 2.178 0.259 1.107 0.256 1.222 0.979 -0.440 -0.538 1.087 0.496 -0.760 -0.049 0.000 1.471 1.683 -1.486 0.000 0.646 0.695 -1.577 3.102 1.093 1.070 0.984 0.608 0.889 0.962 0.866
|
||||
1 0.604 0.592 1.295 0.964 0.348 1.178 -0.016 0.832 2.173 1.626 -0.420 -0.760 0.000 0.748 0.461 -0.906 0.000 0.728 0.309 -1.269 1.551 0.852 0.604 0.989 0.678 0.949 1.021 0.878
|
||||
0 0.428 -1.352 -0.912 1.713 0.797 1.894 -1.452 0.191 2.173 2.378 2.113 -1.190 0.000 0.860 2.174 0.949 0.000 1.693 0.759 1.426 3.102 0.885 1.527 1.186 1.090 3.294 4.492 3.676
|
||||
0 0.473 0.485 0.154 1.433 -1.504 0.766 1.257 -1.302 2.173 0.414 0.119 0.238 0.000 0.805 0.242 -0.691 2.548 0.734 0.749 0.753 0.000 0.430 0.893 1.137 0.686 0.724 0.618 0.608
|
||||
1 0.763 -0.601 0.876 0.182 -1.678 0.818 0.599 0.481 2.173 0.658 -0.737 -0.553 0.000 0.857 -1.138 -1.435 0.000 1.540 -1.466 -0.447 0.000 0.870 0.566 0.989 0.728 0.658 0.821 0.726
|
||||
0 0.619 -0.273 -0.143 0.992 -1.267 0.566 0.876 -1.396 2.173 0.515 0.892 0.618 0.000 0.434 -0.902 0.862 2.548 0.490 -0.539 0.549 0.000 0.568 0.794 0.984 0.667 0.867 0.597 0.578
|
||||
0 0.793 0.970 0.324 0.570 0.816 0.761 -0.550 1.519 2.173 1.150 0.496 -0.447 0.000 0.925 0.724 1.008 1.274 1.135 -0.275 -0.843 0.000 0.829 1.068 0.978 1.603 0.892 1.041 1.059
|
||||
1 0.480 0.364 -0.067 1.906 -1.582 1.397 1.159 0.140 0.000 0.639 0.398 -1.102 0.000 1.597 -0.668 1.607 2.548 1.306 -0.797 0.288 3.102 0.856 1.259 1.297 1.022 1.032 1.049 0.939
|
||||
0 0.514 1.304 1.490 1.741 -0.220 0.648 0.155 0.535 0.000 0.562 -1.016 0.837 0.000 0.863 -0.780 -0.815 2.548 1.688 -0.130 -1.545 3.102 0.887 0.980 1.309 1.269 0.654 1.044 1.035
|
||||
0 1.225 0.333 0.656 0.893 0.859 1.037 -0.876 1.603 1.087 1.769 0.272 -0.227 2.215 1.000 0.579 -1.690 0.000 1.385 0.471 -0.860 0.000 0.884 1.207 0.995 1.097 2.336 1.282 1.145
|
||||
0 2.044 -1.472 -0.294 0.392 0.369 0.927 0.718 1.492 1.087 1.619 -0.736 0.047 2.215 1.884 -0.101 -1.540 0.000 0.548 -0.441 1.117 0.000 0.798 0.877 0.981 0.750 2.272 1.469 1.276
|
||||
0 1.037 -0.276 0.735 3.526 1.156 2.498 0.401 -0.590 1.087 0.714 -1.203 1.393 2.215 0.681 0.629 1.534 0.000 0.719 -0.355 -0.706 0.000 0.831 0.857 0.988 2.864 2.633 1.988 1.466
|
||||
1 0.651 -1.218 -0.791 0.770 -1.449 0.610 -0.535 0.960 2.173 0.380 -1.072 -0.031 2.215 0.415 2.123 -1.100 0.000 0.776 0.217 0.420 0.000 0.986 1.008 1.001 0.853 0.588 0.799 0.776
|
||||
0 1.586 -0.409 0.085 3.258 0.405 1.647 -0.674 -1.519 0.000 0.640 -1.027 -1.681 0.000 1.452 -0.444 -0.957 2.548 0.927 -0.017 1.215 3.102 0.519 0.866 0.992 0.881 0.847 1.018 1.278
|
||||
0 0.712 0.092 -0.466 0.688 1.236 0.921 -1.217 -1.022 2.173 2.236 -1.167 0.868 2.215 0.851 -1.892 -0.753 0.000 0.475 -1.216 -0.383 0.000 0.668 0.758 0.988 1.180 2.093 1.157 0.934
|
||||
0 0.419 0.471 0.974 2.805 0.235 1.473 -0.198 1.255 1.087 0.931 1.083 -0.712 0.000 1.569 1.358 -1.179 2.548 2.506 0.199 -0.842 0.000 0.929 0.991 0.992 1.732 2.367 1.549 1.430
|
||||
1 0.667 1.003 1.504 0.368 1.061 0.885 -0.318 -0.353 0.000 1.438 -1.939 0.710 0.000 1.851 0.277 -1.460 2.548 1.403 0.517 -0.157 0.000 0.883 1.019 1.000 0.790 0.859 0.938 0.841
|
||||
1 1.877 -0.492 0.372 0.441 0.955 1.034 -1.220 -0.846 1.087 0.952 -0.320 1.125 0.000 0.542 0.308 -1.261 2.548 1.018 -1.415 -1.547 0.000 1.280 0.932 0.991 1.273 0.878 0.921 0.906
|
||||
0 1.052 0.901 1.176 1.280 1.517 0.562 -1.150 -0.079 2.173 1.228 -0.308 -0.354 0.000 0.790 -1.492 -0.963 0.000 0.942 -0.672 -1.588 3.102 1.116 0.902 0.988 1.993 0.765 1.375 1.325
|
||||
1 0.518 -0.254 1.642 0.865 0.725 0.980 0.734 0.023 0.000 1.448 0.780 -1.736 2.215 0.955 0.513 -0.519 0.000 0.365 -0.444 -0.243 3.102 0.833 0.555 0.984 0.827 0.795 0.890 0.786
|
||||
0 0.870 0.815 -0.506 0.663 -0.518 0.935 0.289 -1.675 2.173 1.188 0.005 0.635 0.000 0.580 0.066 -1.455 2.548 0.580 -0.634 -0.199 0.000 0.852 0.788 0.979 1.283 0.208 0.856 0.950
|
||||
0 0.628 1.382 0.135 0.683 0.571 1.097 0.564 -0.950 2.173 0.617 -0.326 0.371 0.000 1.093 0.918 1.667 2.548 0.460 1.221 0.708 0.000 0.743 0.861 0.975 1.067 1.007 0.843 0.762
|
||||
0 4.357 0.816 -1.609 1.845 -1.288 3.292 0.726 0.324 2.173 1.528 0.583 -0.801 2.215 0.605 0.572 1.406 0.000 0.794 -0.791 0.122 0.000 0.967 1.132 1.124 3.602 2.811 2.460 1.861
|
||||
0 0.677 -1.265 1.559 0.866 -0.618 0.823 0.260 0.185 0.000 1.133 0.337 1.589 2.215 0.563 -0.830 0.510 0.000 0.777 0.117 -0.941 3.102 0.839 0.763 0.986 1.182 0.649 0.796 0.851
|
||||
0 2.466 -1.838 -1.648 1.717 1.533 1.676 -1.553 -0.109 2.173 0.670 -0.666 0.284 0.000 0.334 -2.480 0.316 0.000 0.366 -0.804 -1.298 3.102 0.875 0.894 0.997 0.548 0.770 1.302 1.079
|
||||
1 1.403 0.129 -1.307 0.688 0.306 0.579 0.753 0.814 1.087 0.474 0.694 -1.400 0.000 0.520 1.995 0.185 0.000 0.929 -0.504 1.270 3.102 0.972 0.998 1.353 0.948 0.650 0.688 0.724
|
||||
1 0.351 1.188 -0.360 0.254 -0.346 1.129 0.545 1.691 0.000 0.652 -0.039 -0.258 2.215 1.089 0.655 0.472 2.548 0.554 -0.493 1.366 0.000 0.808 1.045 0.992 0.570 0.649 0.809 0.744
|
||||
0 1.875 -0.013 -0.128 0.236 1.163 0.902 0.426 0.590 2.173 1.251 -1.210 -0.616 0.000 1.035 1.534 0.912 0.000 1.944 1.789 -1.691 0.000 0.974 1.113 0.990 0.925 1.120 0.956 0.912
|
||||
0 0.298 0.750 -0.507 1.555 1.463 0.804 1.200 -0.665 0.000 0.439 -0.829 -0.252 1.107 0.770 -1.090 0.947 2.548 1.165 -0.166 -0.763 0.000 1.140 0.997 0.988 1.330 0.555 1.005 1.012
|
||||
0 0.647 0.342 0.245 4.340 -0.157 2.229 0.068 1.170 2.173 2.133 -0.201 -1.441 0.000 1.467 0.697 -0.532 1.274 1.457 0.583 -1.640 0.000 0.875 1.417 0.976 2.512 2.390 1.794 1.665
|
||||
1 1.731 -0.803 -1.013 1.492 -0.020 1.646 -0.541 1.121 2.173 0.459 -1.251 -1.495 2.215 0.605 -1.711 -0.232 0.000 0.658 0.634 -0.068 0.000 1.214 0.886 1.738 1.833 1.024 1.192 1.034
|
||||
0 0.515 1.416 -1.089 1.697 1.426 1.414 0.941 0.027 0.000 1.480 0.133 -1.595 2.215 1.110 0.752 0.760 2.548 1.062 0.697 -0.492 0.000 0.851 0.955 0.994 1.105 1.255 1.175 1.095
|
||||
0 1.261 0.858 1.465 0.757 0.305 2.310 0.679 1.080 2.173 1.544 2.518 -0.464 0.000 2.326 0.270 -0.841 0.000 2.163 0.839 -0.500 3.102 0.715 0.825 1.170 0.980 2.371 1.527 1.221
|
||||
1 1.445 1.509 1.471 0.414 -1.285 0.767 0.864 -0.677 2.173 0.524 1.388 0.171 0.000 0.826 0.190 0.121 2.548 0.572 1.691 -1.603 0.000 0.870 0.935 0.994 0.968 0.735 0.783 0.777
|
||||
1 0.919 -0.264 -1.245 0.681 -1.722 1.022 1.010 0.097 2.173 0.685 0.403 -1.351 0.000 1.357 -0.429 1.262 1.274 0.687 1.021 -0.563 0.000 0.953 0.796 0.991 0.873 1.749 1.056 0.917
|
||||
1 0.293 -2.258 -1.427 1.191 1.202 0.394 -2.030 1.438 0.000 0.723 0.596 -0.024 2.215 0.525 -1.678 -0.290 0.000 0.788 -0.824 -1.029 3.102 0.821 0.626 0.976 1.080 0.810 0.842 0.771
|
||||
0 3.286 0.386 1.688 1.619 -1.620 1.392 -0.009 0.280 0.000 1.179 -0.776 -0.110 2.215 1.256 0.248 -1.114 2.548 0.777 0.825 -0.156 0.000 1.026 1.065 0.964 0.909 1.249 1.384 1.395
|
||||
1 1.075 0.603 0.561 0.656 -0.685 0.985 0.175 0.979 2.173 1.154 0.584 -0.886 0.000 1.084 -0.354 -1.004 2.548 0.865 1.224 1.269 0.000 1.346 1.073 1.048 0.873 1.310 1.003 0.865
|
||||
1 1.098 -0.091 1.466 1.558 0.915 0.649 1.314 -1.182 2.173 0.791 0.073 0.351 0.000 0.517 0.940 1.195 0.000 1.150 1.187 -0.692 3.102 0.866 0.822 0.980 1.311 0.394 1.119 0.890
|
||||
1 0.481 -1.042 0.148 1.135 -1.249 1.202 -0.344 0.308 1.087 0.779 -1.431 1.581 0.000 0.860 -0.860 -1.125 0.000 0.785 0.303 1.199 3.102 0.878 0.853 0.988 1.072 0.827 0.936 0.815
|
||||
0 1.348 0.497 0.318 0.806 0.976 1.393 -0.152 0.632 2.173 2.130 0.515 -1.054 0.000 0.908 0.062 -0.780 0.000 1.185 0.687 1.668 1.551 0.720 0.898 0.985 0.683 1.292 1.320 1.131
|
||||
0 2.677 -0.420 -1.685 1.828 1.433 2.040 -0.718 -0.039 0.000 0.400 -0.873 0.472 0.000 0.444 0.340 -0.830 2.548 0.431 0.768 -1.417 3.102 0.869 0.917 0.996 0.707 0.193 0.728 1.154
|
||||
1 1.300 0.586 -0.122 1.306 0.609 0.727 -0.556 -1.652 2.173 0.636 0.720 1.393 2.215 0.328 1.280 -0.390 0.000 0.386 0.752 -0.905 0.000 0.202 0.751 1.106 0.864 0.799 0.928 0.717
|
||||
0 0.637 -0.176 1.737 1.322 -0.414 0.702 -0.964 -0.680 0.000 1.054 -0.461 0.889 2.215 0.861 -0.267 0.225 0.000 1.910 -1.888 1.027 0.000 0.919 0.899 1.186 0.993 1.109 0.862 0.775
|
||||
1 0.723 -0.104 1.572 0.428 -0.840 0.655 0.544 1.401 2.173 1.522 -0.154 -0.452 2.215 0.996 0.190 0.273 0.000 1.906 -0.176 0.966 0.000 0.945 0.894 0.990 0.981 1.555 0.988 0.893
|
||||
0 2.016 -0.570 1.612 0.798 0.441 0.334 0.191 -0.909 0.000 0.939 0.146 0.021 2.215 0.553 -0.444 1.156 2.548 0.781 -1.545 -0.520 0.000 0.922 0.956 1.528 0.722 0.699 0.778 0.901
|
||||
0 1.352 -0.707 1.284 0.665 0.580 0.694 -1.040 -0.899 2.173 0.692 -2.048 0.029 0.000 0.545 -2.042 1.259 0.000 0.661 -0.808 -1.251 3.102 0.845 0.991 0.979 0.662 0.225 0.685 0.769
|
||||
1 1.057 -1.561 -0.411 0.952 -0.681 1.236 -1.107 1.045 2.173 1.288 -2.521 -0.521 0.000 1.361 -1.239 1.546 0.000 0.373 -1.540 0.028 0.000 0.794 0.782 0.987 0.889 0.832 0.972 0.828
|
||||
0 1.118 -0.017 -1.227 1.077 1.256 0.714 0.624 -0.811 0.000 0.800 0.704 0.387 1.107 0.604 0.234 0.986 0.000 1.306 -0.456 0.094 3.102 0.828 0.984 1.195 0.987 0.672 0.774 0.748
|
||||
1 0.602 2.201 0.212 0.119 0.182 0.474 2.130 1.270 0.000 0.370 2.088 -0.573 0.000 0.780 -0.725 -1.033 0.000 1.642 0.598 0.303 3.102 0.886 0.988 0.985 0.644 0.756 0.651 0.599
|
||||
0 1.677 -0.844 1.581 0.585 0.887 1.012 -2.315 0.752 0.000 1.077 0.748 -0.195 0.000 0.718 0.832 -1.337 1.274 1.181 -0.557 -1.006 3.102 1.018 1.247 0.988 0.908 0.651 1.311 1.120
|
||||
1 1.695 0.259 1.224 1.344 1.067 0.718 -1.752 -0.215 0.000 0.473 0.991 -0.993 0.000 0.891 1.285 -1.500 2.548 0.908 -0.131 0.288 0.000 0.945 0.824 0.979 1.009 0.951 0.934 0.833
|
||||
0 0.793 0.628 0.432 1.707 0.302 0.919 1.045 -0.784 0.000 1.472 0.175 -1.284 2.215 1.569 0.155 0.971 2.548 0.435 0.735 1.625 0.000 0.801 0.907 0.992 0.831 1.446 1.082 1.051
|
||||
1 0.537 -0.664 -0.244 1.104 1.272 1.154 0.394 1.633 0.000 1.527 0.963 0.559 2.215 1.744 0.650 -0.912 0.000 1.097 0.730 -0.368 3.102 1.953 1.319 1.045 1.309 0.869 1.196 1.126
|
||||
1 0.585 -1.469 1.005 0.749 -1.060 1.224 -0.717 -0.323 2.173 1.012 -0.201 1.268 0.000 0.359 -0.567 0.476 0.000 1.117 -1.124 1.557 3.102 0.636 1.281 0.986 0.616 1.289 0.890 0.881
|
||||
1 0.354 -1.517 0.667 2.534 -1.298 1.020 -0.375 1.254 0.000 1.119 -0.060 -1.538 2.215 1.059 -0.395 -0.140 0.000 2.609 0.199 -0.778 1.551 0.957 0.975 1.286 1.666 1.003 1.224 1.135
|
||||
1 0.691 -1.619 -1.380 0.361 1.727 1.493 -1.093 -0.289 0.000 1.447 -0.640 1.341 0.000 1.453 -0.617 -1.456 1.274 1.061 -1.481 -0.091 0.000 0.744 0.649 0.987 0.596 0.727 0.856 0.797
|
||||
0 1.336 1.293 -1.359 0.357 0.067 1.110 -0.058 -0.515 0.000 0.976 1.498 1.207 0.000 1.133 0.437 1.053 2.548 0.543 1.374 0.171 0.000 0.764 0.761 0.984 0.827 0.553 0.607 0.612
|
||||
0 0.417 -1.111 1.661 2.209 -0.683 1.931 -0.642 0.959 1.087 1.514 -2.032 -0.686 0.000 1.521 -0.539 1.344 0.000 0.978 -0.866 0.363 1.551 2.813 1.850 1.140 1.854 0.799 1.600 1.556
|
||||
0 1.058 0.390 -0.591 0.134 1.149 0.346 -1.550 0.186 0.000 1.108 -0.999 0.843 1.107 1.124 0.415 -1.514 0.000 1.067 -0.426 -1.000 3.102 1.744 1.050 0.985 1.006 1.010 0.883 0.789
|
||||
1 1.655 0.253 1.216 0.270 1.703 0.500 -0.006 -1.418 2.173 0.690 -0.350 0.170 2.215 1.045 -0.924 -0.774 0.000 0.996 -0.745 -0.123 0.000 0.839 0.820 0.993 0.921 0.869 0.725 0.708
|
||||
0 1.603 -0.850 0.564 0.829 0.093 1.270 -1.113 -1.155 2.173 0.853 -1.021 1.248 2.215 0.617 -1.270 1.733 0.000 0.935 -0.092 0.136 0.000 1.011 1.074 0.977 0.823 1.269 1.054 0.878
|
||||
0 1.568 -0.792 1.005 0.545 0.896 0.895 -1.698 -0.988 0.000 0.608 -1.634 1.705 0.000 0.826 0.208 0.618 1.274 2.063 -1.743 -0.520 0.000 0.939 0.986 0.990 0.600 0.435 1.033 1.087
|
||||
0 0.489 -1.335 -1.102 1.738 1.028 0.628 -0.992 -0.627 0.000 0.652 -0.064 -0.215 0.000 1.072 0.173 -1.251 2.548 1.042 0.057 0.841 3.102 0.823 0.895 1.200 1.164 0.770 0.837 0.846
|
||||
1 1.876 0.870 1.234 0.556 -1.262 1.764 0.855 -0.467 2.173 1.079 1.351 0.852 0.000 0.773 0.383 0.874 0.000 1.292 0.829 -1.228 3.102 0.707 0.969 1.102 1.601 1.017 1.112 1.028
|
||||
0 1.033 0.407 -0.374 0.705 -1.254 0.690 -0.231 1.502 2.173 0.433 -2.009 -0.057 0.000 0.861 1.151 0.334 0.000 0.960 -0.839 1.299 3.102 2.411 1.480 0.982 0.995 0.377 1.012 0.994
|
||||
0 1.092 0.653 -0.801 0.463 0.426 0.529 -1.055 0.040 0.000 0.663 0.999 1.255 1.107 0.749 -1.106 1.185 2.548 0.841 -0.745 -1.029 0.000 0.841 0.743 0.988 0.750 1.028 0.831 0.868
|
||||
1 0.799 -0.285 -0.011 0.531 1.392 1.063 0.854 0.494 2.173 1.187 -1.065 -0.851 0.000 0.429 -0.296 1.072 0.000 0.942 -1.985 1.172 0.000 0.873 0.693 0.992 0.819 0.689 1.131 0.913
|
||||
0 0.503 1.973 -0.377 1.515 -1.514 0.708 1.081 -0.313 2.173 1.110 -0.417 0.839 0.000 0.712 -1.153 1.165 0.000 0.675 -0.303 -0.930 1.551 0.709 0.761 1.032 0.986 0.698 0.963 1.291
|
||||
0 0.690 -0.574 -1.608 1.182 1.118 0.557 -2.243 0.144 0.000 0.969 0.216 -1.383 1.107 1.054 0.888 -0.709 2.548 0.566 1.663 -0.550 0.000 0.752 1.528 0.987 1.408 0.740 1.290 1.123
|
||||
1 0.890 1.501 0.786 0.779 -0.615 1.126 0.716 1.541 2.173 0.887 0.728 -0.673 2.215 1.216 0.332 -0.020 0.000 0.965 1.828 0.101 0.000 0.827 0.715 1.099 1.088 1.339 0.924 0.878
|
||||
0 0.566 0.883 0.655 1.600 0.034 1.155 2.028 -1.499 0.000 0.723 -0.871 0.763 0.000 1.286 -0.696 -0.676 2.548 1.134 -0.113 1.207 3.102 4.366 2.493 0.984 0.960 0.962 1.843 1.511
|
||||
0 1.146 1.086 -0.911 0.838 1.298 0.821 0.127 -0.145 0.000 1.352 0.474 -1.580 2.215 1.619 -0.081 0.675 2.548 1.382 -0.748 0.127 0.000 0.958 0.976 1.239 0.876 1.481 1.116 1.076
|
||||
0 1.739 -0.326 -1.661 0.420 -1.705 1.193 -0.031 -1.212 2.173 1.783 -0.442 0.522 0.000 1.064 -0.692 0.027 0.000 1.314 0.359 -0.037 3.102 0.968 0.897 0.986 0.907 1.196 1.175 1.112
|
||||
1 0.669 0.194 -0.703 0.657 -0.260 0.899 -2.511 0.311 0.000 1.482 0.773 0.974 2.215 3.459 0.037 -1.299 1.274 2.113 0.067 1.516 0.000 0.740 0.871 0.979 1.361 2.330 1.322 1.046
|
||||
1 1.355 -1.033 -1.173 0.552 -0.048 0.899 -0.482 -1.287 2.173 1.422 -1.227 0.390 1.107 1.937 -0.028 0.914 0.000 0.849 -0.230 -1.734 0.000 0.986 1.224 1.017 1.051 1.788 1.150 1.009
|
||||
1 0.511 -0.202 1.029 0.780 1.154 0.816 0.532 -0.731 0.000 0.757 0.517 0.749 2.215 1.302 0.289 -1.188 0.000 0.584 1.211 -0.350 0.000 0.876 0.943 0.995 0.963 0.256 0.808 0.891
|
||||
1 1.109 0.572 1.484 0.753 1.543 1.711 -0.145 -0.746 1.087 1.759 0.631 0.845 2.215 0.945 0.542 0.003 0.000 0.378 -1.150 -0.044 0.000 0.764 1.042 0.992 1.045 2.736 1.441 1.140
|
||||
0 0.712 -0.025 0.553 0.928 -0.711 1.304 0.045 -0.300 0.000 0.477 0.720 0.969 0.000 1.727 -0.474 1.328 1.274 1.282 2.222 1.684 0.000 0.819 0.765 1.023 0.961 0.657 0.799 0.744
|
||||
1 1.131 -0.302 1.079 0.901 0.236 0.904 -0.249 1.694 2.173 1.507 -0.702 -1.128 0.000 0.774 0.565 0.284 2.548 1.802 1.446 -0.192 0.000 3.720 2.108 0.986 0.930 1.101 1.484 1.238
|
||||
0 1.392 1.253 0.118 0.864 -1.358 0.922 -0.447 -1.243 1.087 1.969 1.031 0.774 2.215 1.333 -0.359 -0.681 0.000 1.099 -0.257 1.473 0.000 1.246 0.909 1.475 1.234 2.531 1.449 1.306
|
||||
0 1.374 2.291 -0.479 1.339 -0.243 0.687 2.345 1.310 0.000 0.467 1.081 0.772 0.000 0.656 1.155 -1.636 2.548 0.592 0.536 -1.269 3.102 0.981 0.821 1.010 0.877 0.217 0.638 0.758
|
||||
1 0.401 -1.516 0.909 2.738 0.519 0.887 0.566 -1.202 0.000 0.909 -0.176 1.682 0.000 2.149 -0.878 -0.514 2.548 0.929 -0.563 -1.555 3.102 1.228 0.803 0.980 1.382 0.884 1.025 1.172
|
||||
1 0.430 -1.589 1.417 2.158 1.226 1.180 -0.829 -0.781 2.173 0.798 1.400 -0.111 0.000 0.939 -0.878 1.076 2.548 0.576 1.335 -0.826 0.000 0.861 0.970 0.982 1.489 1.308 1.015 0.992
|
||||
1 1.943 -0.391 -0.840 0.621 -1.613 2.026 1.734 1.025 0.000 0.930 0.573 -0.912 0.000 1.326 0.847 -0.220 1.274 1.181 0.079 0.709 3.102 1.164 1.007 0.987 1.094 0.821 0.857 0.786
|
||||
1 0.499 0.436 0.887 0.859 1.509 0.733 -0.559 1.111 1.087 1.011 -0.796 0.279 2.215 1.472 -0.510 -0.982 0.000 1.952 0.379 -0.733 0.000 1.076 1.358 0.991 0.589 0.879 1.068 0.922
|
||||
0 0.998 -0.407 -1.711 0.139 0.652 0.810 -0.331 -0.721 0.000 0.471 -0.533 0.442 0.000 0.531 -1.405 0.120 2.548 0.707 0.098 -1.176 1.551 1.145 0.809 0.988 0.529 0.612 0.562 0.609
|
||||
1 1.482 0.872 0.638 1.288 0.362 0.856 0.900 -0.511 1.087 1.072 1.061 -1.432 2.215 1.770 -2.292 -1.547 0.000 1.131 1.374 0.783 0.000 6.316 4.381 1.002 1.317 1.048 2.903 2.351
|
||||
1 2.084 -0.422 1.289 1.125 0.735 1.104 -0.518 -0.326 2.173 0.413 -0.719 -0.699 0.000 0.857 0.108 -1.631 0.000 0.527 0.641 -1.362 3.102 0.791 0.952 1.016 0.776 0.856 0.987 0.836
|
||||
0 0.464 0.674 0.025 0.430 -1.703 0.982 -1.311 -0.808 2.173 1.875 1.060 0.821 2.215 0.954 -0.480 -1.677 0.000 0.567 0.702 -0.939 0.000 0.781 1.076 0.989 1.256 3.632 1.652 1.252
|
||||
1 0.457 -1.944 -1.010 1.409 0.931 1.098 -0.742 -0.415 0.000 1.537 -0.834 0.945 2.215 1.752 -0.287 -1.269 2.548 0.692 -1.537 -0.223 0.000 0.801 1.192 1.094 1.006 1.659 1.175 1.122
|
||||
0 3.260 -0.943 1.737 0.920 1.309 0.946 -0.139 -0.271 2.173 0.994 -0.952 -0.311 0.000 0.563 -0.136 -0.881 0.000 1.236 -0.507 0.906 1.551 0.747 0.869 0.985 1.769 1.034 1.179 1.042
|
||||
0 0.615 -0.778 0.246 1.861 1.619 0.560 -0.943 -0.204 2.173 0.550 -0.759 -1.342 2.215 0.578 0.076 -0.973 0.000 0.939 0.035 0.680 0.000 0.810 0.747 1.401 0.772 0.702 0.719 0.662
|
||||
1 2.370 -0.064 -0.237 1.737 0.154 2.319 -1.838 -1.673 0.000 1.053 -1.305 -0.075 0.000 0.925 0.149 0.318 1.274 0.851 -0.922 0.981 3.102 0.919 0.940 0.989 0.612 0.598 1.219 1.626
|
||||
1 1.486 0.311 -1.262 1.354 -0.847 0.886 -0.158 1.213 2.173 1.160 -0.218 0.239 0.000 1.166 0.494 0.278 2.548 0.575 1.454 -1.701 0.000 0.429 1.129 0.983 1.111 1.049 1.006 0.920
|
||||
1 1.294 1.587 -0.864 0.487 -0.312 0.828 1.051 -0.031 1.087 2.443 1.216 1.609 2.215 1.167 0.813 0.921 0.000 1.751 -0.415 0.119 0.000 1.015 1.091 0.974 1.357 2.093 1.178 1.059
|
||||
1 0.984 0.465 -1.661 0.379 -0.554 0.977 0.237 0.365 0.000 0.510 0.143 1.101 0.000 1.099 -0.662 -1.593 2.548 1.104 -0.197 -0.648 3.102 0.925 0.922 0.986 0.642 0.667 0.806 0.722
|
||||
1 0.930 -0.009 0.047 0.667 1.367 1.065 -0.231 0.815 0.000 1.199 -1.114 -0.877 2.215 0.940 0.824 -1.583 0.000 1.052 -0.407 -0.076 1.551 1.843 1.257 1.013 1.047 0.751 1.158 0.941
|
||||
0 0.767 -0.011 -0.637 0.341 -1.437 1.438 -0.425 -0.450 2.173 1.073 -0.718 1.341 2.215 0.633 -1.394 0.486 0.000 0.603 -1.945 -1.626 0.000 0.703 0.790 0.984 1.111 1.848 1.129 1.072
|
||||
1 1.779 0.017 0.432 0.402 1.022 0.959 1.480 1.595 2.173 1.252 1.365 0.006 0.000 1.188 -0.174 -1.107 0.000 1.181 0.518 -0.258 0.000 1.057 0.910 0.991 1.616 0.779 1.158 1.053
|
||||
0 0.881 0.630 1.029 1.990 0.508 1.102 0.742 -1.298 2.173 1.565 1.085 0.686 2.215 2.691 1.391 -0.904 0.000 0.499 1.388 -1.199 0.000 0.347 0.861 0.997 0.881 1.920 1.233 1.310
|
||||
0 1.754 -0.266 0.389 0.347 -0.030 0.462 -1.408 -0.957 2.173 0.515 -2.341 -1.700 0.000 0.588 -0.797 1.355 2.548 0.608 0.329 -1.389 0.000 1.406 0.909 0.988 0.760 0.593 0.768 0.847
|
||||
0 1.087 0.311 -1.447 0.173 0.567 0.854 0.362 0.584 0.000 1.416 -0.716 -1.211 2.215 0.648 -0.358 -0.692 1.274 0.867 -0.513 0.206 0.000 0.803 0.813 0.984 1.110 0.491 0.921 0.873
|
||||
0 0.279 1.114 -1.190 3.004 -0.738 1.233 0.896 1.092 2.173 0.454 -0.374 0.117 2.215 0.357 0.119 1.270 0.000 0.458 1.343 0.316 0.000 0.495 0.540 0.988 1.715 1.139 1.618 1.183
|
||||
1 1.773 -0.694 -1.518 2.306 -1.200 3.104 0.749 0.362 0.000 1.871 0.230 -1.686 2.215 0.805 -0.179 -0.871 1.274 0.910 0.607 -0.246 0.000 1.338 1.598 0.984 1.050 0.919 1.678 1.807
|
||||
0 0.553 0.683 0.827 0.973 -0.706 1.488 0.149 1.140 2.173 1.788 0.447 -0.478 0.000 0.596 1.043 1.607 0.000 0.373 -0.868 -1.308 1.551 1.607 1.026 0.998 1.134 0.808 1.142 0.936
|
||||
1 0.397 1.101 -1.139 1.688 0.146 0.972 0.541 1.518 0.000 1.549 -0.873 -1.012 0.000 2.282 -0.151 0.314 2.548 1.174 0.033 -1.368 0.000 0.937 0.776 1.039 1.143 0.959 0.986 1.013
|
||||
1 0.840 1.906 -0.959 0.869 0.576 0.642 0.554 -1.351 0.000 0.756 0.923 -0.823 2.215 1.251 1.130 0.545 2.548 1.513 0.410 1.073 0.000 1.231 0.985 1.163 0.812 0.987 0.816 0.822
|
||||
1 0.477 1.665 0.814 0.763 -0.382 0.828 -0.008 0.280 2.173 1.213 -0.001 1.560 0.000 1.136 0.311 -1.289 0.000 0.797 1.091 -0.616 3.102 1.026 0.964 0.992 0.772 0.869 0.916 0.803
|
||||
0 2.655 0.020 0.273 1.464 0.482 1.709 -0.107 -1.456 2.173 0.825 0.141 -0.386 0.000 1.342 -0.592 1.635 1.274 0.859 -0.175 -0.874 0.000 0.829 0.946 1.003 2.179 0.836 1.505 1.176
|
||||
0 0.771 -1.992 -0.720 0.732 -1.464 0.869 -1.290 0.388 2.173 0.926 -1.072 -1.489 2.215 0.640 -1.232 0.840 0.000 0.528 -2.440 -0.446 0.000 0.811 0.868 0.993 0.995 1.317 0.809 0.714
|
||||
0 1.357 1.302 0.076 0.283 -1.060 0.783 1.559 -0.994 0.000 0.947 1.212 1.617 0.000 1.127 0.311 0.442 2.548 0.582 -0.052 1.186 1.551 1.330 0.995 0.985 0.846 0.404 0.858 0.815
|
||||
0 0.442 -0.381 -0.424 1.244 0.591 0.731 0.605 -0.713 2.173 0.629 2.762 1.040 0.000 0.476 2.693 -0.617 0.000 0.399 0.442 1.486 3.102 0.839 0.755 0.988 0.869 0.524 0.877 0.918
|
||||
0 0.884 0.422 0.055 0.818 0.624 0.950 -0.763 1.624 0.000 0.818 -0.609 -1.166 0.000 1.057 -0.528 1.070 2.548 1.691 -0.124 -0.335 3.102 1.104 0.933 0.985 0.913 1.000 0.863 1.056
|
||||
0 1.276 0.156 1.714 1.053 -1.189 0.672 -0.464 -0.030 2.173 0.469 -2.483 0.442 0.000 0.564 2.580 -0.253 0.000 0.444 -0.628 1.080 1.551 5.832 2.983 0.985 1.162 0.494 1.809 1.513
|
||||
0 1.106 -0.556 0.406 0.573 -1.400 0.769 -0.518 1.457 2.173 0.743 -0.352 -0.010 0.000 1.469 -0.550 -0.930 2.548 0.540 1.236 -0.571 0.000 0.962 0.970 1.101 0.805 1.107 0.873 0.773
|
||||
0 0.539 -0.964 -0.464 1.371 -1.606 0.667 -0.160 0.655 0.000 0.952 0.352 -0.740 2.215 0.952 0.007 1.123 0.000 1.061 -0.505 1.389 3.102 1.063 0.991 1.019 0.633 0.967 0.732 0.799
|
||||
1 0.533 -0.989 -1.608 0.462 -1.723 1.204 -0.598 -0.098 2.173 1.343 -0.460 1.632 2.215 0.577 0.221 -0.492 0.000 0.628 -0.073 0.472 0.000 0.518 0.880 0.988 1.179 1.874 1.041 0.813
|
||||
1 1.024 1.075 -0.795 0.286 -1.436 1.365 0.857 -0.309 2.173 0.804 1.532 1.435 0.000 1.511 0.722 1.494 0.000 1.778 0.903 0.753 1.551 0.686 0.810 0.999 0.900 1.360 1.133 0.978
|
||||
1 2.085 -0.269 -1.423 0.789 1.298 0.281 1.652 0.187 0.000 0.658 -0.760 -0.042 2.215 0.663 0.024 0.120 0.000 0.552 -0.299 -0.428 3.102 0.713 0.811 1.130 0.705 0.218 0.675 0.743
|
||||
1 0.980 -0.443 0.813 0.785 -1.253 0.719 0.448 -1.458 0.000 1.087 0.595 0.635 1.107 1.428 0.029 -0.995 0.000 1.083 1.562 -0.092 0.000 0.834 0.891 1.165 0.967 0.661 0.880 0.817
|
||||
1 0.903 -0.733 -0.980 0.634 -0.639 0.780 0.266 -0.287 2.173 1.264 -0.936 1.004 0.000 1.002 -0.056 -1.344 2.548 1.183 -0.098 1.169 0.000 0.733 1.002 0.985 0.711 0.916 0.966 0.875
|
||||
0 0.734 -0.304 -1.175 2.851 1.674 0.904 -0.634 0.412 2.173 1.363 -1.050 -0.282 0.000 1.476 -1.603 0.103 0.000 2.231 -0.718 1.708 3.102 0.813 0.896 1.088 0.686 1.392 1.033 1.078
|
||||
1 1.680 0.591 -0.243 0.111 -0.478 0.326 -0.079 -1.555 2.173 0.711 0.714 0.922 2.215 0.355 0.858 1.682 0.000 0.727 1.620 1.360 0.000 0.334 0.526 1.001 0.862 0.633 0.660 0.619
|
||||
1 1.163 0.225 -0.202 0.501 -0.979 1.609 -0.938 1.424 0.000 1.224 -0.118 -1.274 0.000 2.034 1.241 -0.254 0.000 1.765 0.536 0.237 3.102 0.894 0.838 0.988 0.693 0.579 0.762 0.726
|
||||
0 1.223 1.232 1.471 0.489 1.728 0.703 -0.111 0.411 0.000 1.367 1.014 -1.294 1.107 1.524 -0.414 -0.164 2.548 1.292 0.833 0.316 0.000 0.861 0.752 0.994 0.836 1.814 1.089 0.950
|
||||
0 0.816 1.637 -1.557 1.036 -0.342 0.913 1.333 0.949 2.173 0.812 0.756 -0.628 2.215 1.333 0.470 1.495 0.000 1.204 -2.222 -1.675 0.000 1.013 0.924 1.133 0.758 1.304 0.855 0.860
|
||||
0 0.851 -0.564 -0.691 0.692 1.345 1.219 1.014 0.318 0.000 1.422 -0.262 -1.635 2.215 0.531 1.802 0.008 0.000 0.508 0.515 -1.267 3.102 0.821 0.787 1.026 0.783 0.432 1.149 1.034
|
||||
0 0.800 -0.599 0.204 0.552 -0.484 0.974 0.413 0.961 2.173 1.269 -0.984 -1.039 2.215 0.380 -1.213 1.371 0.000 0.551 0.332 -0.659 0.000 0.694 0.852 0.984 1.057 2.037 1.096 0.846
|
||||
0 0.744 -0.071 -0.255 0.638 0.512 1.125 0.407 0.844 2.173 0.860 -0.481 -0.677 0.000 1.102 0.181 -1.194 0.000 1.011 -1.081 -1.713 3.102 0.854 0.862 0.982 1.111 1.372 1.042 0.920
|
||||
1 0.400 1.049 -0.625 0.880 -0.407 1.040 2.150 -1.359 0.000 0.747 -0.144 0.847 2.215 0.560 -1.829 0.698 0.000 1.663 -0.668 0.267 0.000 0.845 0.964 0.996 0.820 0.789 0.668 0.668
|
||||
0 1.659 -0.705 -1.057 1.803 -1.436 1.008 0.693 0.005 0.000 0.895 -0.007 0.681 1.107 1.085 0.125 1.476 2.548 1.214 1.068 0.486 0.000 0.867 0.919 0.986 1.069 0.692 1.026 1.313
|
||||
0 0.829 -0.153 0.861 0.615 -0.548 0.589 1.077 -0.041 2.173 1.056 0.763 -1.737 0.000 0.639 0.970 0.725 0.000 0.955 1.227 -0.799 3.102 1.020 1.024 0.985 0.750 0.525 0.685 0.671
|
||||
1 0.920 -0.806 -0.840 1.048 0.278 0.973 -0.077 -1.364 2.173 1.029 0.309 0.133 0.000 1.444 1.484 1.618 1.274 1.419 -0.482 0.417 0.000 0.831 1.430 1.151 1.829 1.560 1.343 1.224
|
||||
1 0.686 0.249 -0.905 0.343 -1.731 0.724 -2.823 -0.901 0.000 0.982 0.303 1.312 1.107 1.016 0.245 0.610 0.000 1.303 -0.557 -0.360 3.102 1.384 1.030 0.984 0.862 1.144 0.866 0.779
|
||||
0 1.603 0.444 0.508 0.586 0.401 0.610 0.467 -1.735 2.173 0.914 0.626 -1.019 0.000 0.812 0.422 -0.408 2.548 0.902 1.679 1.490 0.000 1.265 0.929 0.990 1.004 0.816 0.753 0.851
|
||||
1 0.623 0.780 -0.203 0.056 0.015 0.899 0.793 1.326 1.087 0.803 1.478 -1.499 2.215 1.561 1.492 -0.120 0.000 0.904 0.795 0.137 0.000 0.548 1.009 0.850 0.924 0.838 0.914 0.860
|
||||
0 1.654 -2.032 -1.160 0.859 -1.583 0.689 -1.965 0.891 0.000 0.646 -1.014 -0.288 2.215 0.630 -0.815 0.402 0.000 0.638 0.316 0.655 3.102 0.845 0.879 0.993 1.067 0.625 1.041 0.958
|
||||
1 0.828 -1.269 -1.203 0.744 -0.213 0.626 -1.017 -0.404 0.000 1.281 -0.931 1.733 2.215 0.699 -0.351 1.287 0.000 1.251 -1.171 0.197 0.000 0.976 1.186 0.987 0.646 0.655 0.733 0.671
|
||||
1 0.677 0.111 1.090 1.580 1.591 1.560 0.654 -0.341 2.173 0.794 -0.266 0.702 0.000 0.823 0.651 -1.239 2.548 0.730 1.467 -1.530 0.000 1.492 1.023 0.983 1.909 1.022 1.265 1.127
|
||||
1 0.736 0.882 -1.060 0.589 0.168 1.663 0.781 1.022 2.173 2.025 1.648 -1.292 0.000 1.240 0.924 -0.421 1.274 1.354 0.065 0.501 0.000 0.316 0.925 0.988 0.664 1.736 0.992 0.807
|
||||
1 1.040 -0.822 1.638 0.974 -0.674 0.393 0.830 0.011 2.173 0.770 -0.140 -0.402 0.000 0.294 -0.133 0.030 0.000 1.220 0.807 0.638 0.000 0.826 1.063 1.216 1.026 0.705 0.934 0.823
|
||||
1 0.711 0.602 0.048 1.145 0.966 0.934 0.263 -1.589 2.173 0.971 -0.496 -0.421 1.107 0.628 -0.865 0.845 0.000 0.661 -0.008 -0.565 0.000 0.893 0.705 0.988 0.998 1.339 0.908 0.872
|
||||
1 0.953 -1.651 -0.167 0.885 1.053 1.013 -1.239 0.133 0.000 1.884 -1.122 1.222 2.215 1.906 -0.860 -1.184 1.274 1.413 -0.668 -1.647 0.000 1.873 1.510 1.133 1.050 1.678 1.246 1.061
|
||||
1 0.986 -0.892 -1.380 0.917 1.134 0.950 -1.162 -0.469 0.000 0.569 -1.393 0.215 0.000 0.320 2.667 1.712 0.000 1.570 -0.375 1.457 3.102 0.925 1.128 1.011 0.598 0.824 0.913 0.833
|
||||
1 1.067 0.099 1.154 0.527 -0.789 1.085 0.623 -1.602 2.173 1.511 -0.230 0.022 2.215 0.269 -0.377 0.883 0.000 0.571 -0.540 -0.512 0.000 0.414 0.803 1.022 0.959 2.053 1.041 0.780
|
||||
0 0.825 -2.118 0.217 1.453 -0.493 0.819 0.313 -0.942 0.000 2.098 -0.725 1.096 2.215 0.484 1.336 1.458 0.000 0.482 0.100 1.163 0.000 0.913 0.536 0.990 1.679 0.957 1.095 1.143
|
||||
1 1.507 0.054 1.120 0.698 -1.340 0.912 0.384 0.015 1.087 0.720 0.247 -0.820 0.000 0.286 0.154 1.578 2.548 0.629 1.582 -0.576 0.000 0.828 0.893 1.136 0.514 0.632 0.699 0.709
|
||||
1 0.610 1.180 -0.993 0.816 0.301 0.932 0.758 1.539 0.000 0.726 -0.830 0.248 2.215 0.883 0.857 -1.305 0.000 1.338 1.009 -0.252 3.102 0.901 1.074 0.987 0.875 1.159 1.035 0.858
|
||||
1 1.247 -1.360 1.502 1.525 -1.332 0.618 1.063 0.755 0.000 0.582 -0.155 0.473 2.215 1.214 -0.422 -0.551 2.548 0.838 -1.171 -1.166 0.000 2.051 1.215 1.062 1.091 0.725 0.896 1.091
|
||||
0 0.373 -0.600 1.291 2.573 0.207 0.765 -0.209 1.667 0.000 0.668 0.724 -1.499 0.000 1.045 -0.338 -0.754 2.548 0.558 -0.469 0.029 3.102 0.868 0.939 1.124 0.519 0.383 0.636 0.838
|
||||
0 0.791 0.336 -0.307 0.494 1.213 1.158 0.336 1.081 2.173 0.918 1.289 -0.449 0.000 0.735 -0.521 -0.969 0.000 1.052 0.499 -1.188 3.102 0.699 1.013 0.987 0.622 1.050 0.712 0.661
|
||||
0 1.321 0.856 0.464 0.202 0.901 1.144 0.120 -1.651 0.000 0.803 0.577 -0.509 2.215 0.695 -0.114 0.423 2.548 0.621 1.852 -0.420 0.000 0.697 0.964 0.983 0.527 0.659 0.719 0.729
|
||||
0 0.563 2.081 0.913 0.982 -0.533 0.549 -0.481 -1.730 0.000 0.962 0.921 0.569 2.215 0.731 1.184 -0.679 1.274 0.918 0.931 -1.432 0.000 1.008 0.919 0.993 0.895 0.819 0.810 0.878
|
||||
1 1.148 0.345 0.953 0.921 0.617 0.991 1.103 -0.484 0.000 0.970 1.978 1.525 0.000 1.150 0.689 -0.757 2.548 0.517 0.995 1.245 0.000 1.093 1.140 0.998 1.006 0.756 0.864 0.838
|
||||
1 1.400 0.128 -1.695 1.169 1.070 1.094 -0.345 -0.249 0.000 1.224 0.364 -0.036 2.215 1.178 0.530 -1.544 0.000 1.334 0.933 1.604 0.000 0.560 1.267 1.073 0.716 0.780 0.832 0.792
|
||||
0 0.330 -2.133 1.403 0.628 0.379 1.686 -0.995 0.030 1.087 2.071 0.127 -0.457 0.000 4.662 -0.855 1.477 0.000 2.072 -0.917 -1.416 3.102 5.403 3.074 0.977 0.936 1.910 2.325 1.702
|
||||
0 0.989 0.473 0.968 1.970 1.368 0.844 0.574 -0.290 2.173 0.866 -0.345 -1.019 0.000 1.130 0.605 -0.752 0.000 0.956 -0.888 0.870 3.102 0.885 0.886 0.982 1.157 1.201 1.100 1.068
|
||||
1 0.773 0.418 0.753 1.388 1.070 1.104 -0.378 -0.758 0.000 1.027 0.397 -0.496 2.215 1.234 0.027 1.084 2.548 0.936 0.209 1.677 0.000 1.355 1.020 0.983 0.550 1.206 0.916 0.931
|
||||
0 0.319 2.015 1.534 0.570 -1.134 0.632 0.124 0.757 0.000 0.477 0.598 -1.109 1.107 0.449 0.438 -0.755 2.548 0.574 -0.659 0.691 0.000 0.440 0.749 0.985 0.517 0.158 0.505 0.522
|
||||
0 1.215 1.453 -1.386 1.276 1.298 0.643 0.570 -0.196 2.173 0.588 2.104 0.498 0.000 0.617 -0.296 -0.801 2.548 0.452 0.110 0.313 0.000 0.815 0.953 1.141 1.166 0.547 0.892 0.807
|
||||
1 1.257 -1.869 -0.060 0.265 0.653 1.527 -0.346 1.163 2.173 0.758 -2.119 -0.604 0.000 1.473 -1.133 -1.290 2.548 0.477 -0.428 -0.066 0.000 0.818 0.841 0.984 1.446 1.729 1.211 1.054
|
||||
1 1.449 0.464 1.585 1.418 -1.488 1.540 0.942 0.087 0.000 0.898 0.402 -0.631 2.215 0.753 0.039 -1.729 0.000 0.859 0.849 -1.054 0.000 0.791 0.677 0.995 0.687 0.527 0.703 0.606
|
||||
1 1.084 -1.997 0.900 1.333 1.024 0.872 -0.864 -1.500 2.173 1.072 -0.813 -0.421 2.215 0.924 0.478 0.304 0.000 0.992 -0.398 -1.022 0.000 0.741 1.085 0.980 1.221 1.176 1.032 0.961
|
||||
0 1.712 1.129 0.125 1.120 -1.402 1.749 0.951 -1.575 2.173 1.711 0.445 0.578 0.000 1.114 0.234 -1.011 0.000 1.577 -0.088 0.086 3.102 2.108 1.312 1.882 1.597 2.009 1.441 1.308
|
||||
0 0.530 0.248 1.622 1.450 -1.012 1.221 -1.154 -0.763 2.173 1.698 -0.586 0.733 0.000 0.889 1.042 1.038 1.274 0.657 0.008 0.701 0.000 0.430 1.005 0.983 0.930 2.264 1.357 1.146
|
||||
1 0.921 1.735 0.883 0.699 -1.614 0.821 1.463 0.319 1.087 1.099 0.814 -1.600 2.215 1.375 0.702 -0.691 0.000 0.869 1.326 -0.790 0.000 0.980 0.900 0.988 0.832 1.452 0.816 0.709
|
||||
0 2.485 -0.823 -0.297 0.886 -1.404 0.989 0.835 1.615 2.173 0.382 0.588 -0.224 0.000 1.029 -0.456 1.546 2.548 0.613 -0.359 -0.789 0.000 0.768 0.977 1.726 2.007 0.913 1.338 1.180
|
||||
1 0.657 -0.069 -0.078 1.107 1.549 0.804 1.335 -1.630 2.173 1.271 0.481 0.153 1.107 1.028 0.144 -0.762 0.000 1.098 0.132 1.570 0.000 0.830 0.979 1.175 1.069 1.624 1.000 0.868
|
||||
1 2.032 0.329 -1.003 0.493 -0.136 1.159 -0.224 0.750 1.087 0.396 0.546 0.587 0.000 0.620 1.805 0.982 0.000 1.236 0.744 -1.621 0.000 0.930 1.200 0.988 0.482 0.771 0.887 0.779
|
||||
0 0.524 -1.319 0.634 0.471 1.221 0.599 -0.588 -0.461 0.000 1.230 -1.504 -1.517 1.107 1.436 -0.035 0.104 2.548 0.629 1.997 -1.282 0.000 2.084 1.450 0.984 1.084 1.827 1.547 1.213
|
||||
1 0.871 0.618 -1.544 0.718 0.186 1.041 -1.180 0.434 2.173 1.133 1.558 -1.301 0.000 0.452 -0.595 0.522 0.000 0.665 0.567 0.130 3.102 1.872 1.114 1.095 1.398 0.979 1.472 1.168
|
||||
1 3.308 1.037 -0.634 0.690 -0.619 1.975 0.949 1.280 0.000 0.826 0.546 -0.139 2.215 0.635 -0.045 0.427 0.000 1.224 0.112 1.339 3.102 1.756 1.050 0.992 0.738 0.903 0.968 1.238
|
||||
0 0.588 2.104 -0.872 1.136 1.743 0.842 0.638 0.015 0.000 0.481 0.928 1.000 2.215 0.595 0.125 1.429 0.000 0.951 -1.140 -0.511 3.102 1.031 1.057 0.979 0.673 1.064 1.001 0.891
|
||||
0 0.289 0.823 0.013 0.615 -1.601 0.177 2.403 -0.015 0.000 0.258 1.151 1.036 2.215 0.694 0.553 -1.326 2.548 0.411 0.366 0.106 0.000 0.482 0.562 0.989 0.670 0.404 0.516 0.561
|
||||
1 0.294 -0.660 -1.162 1.752 0.384 0.860 0.513 1.119 0.000 2.416 0.107 -1.342 0.000 1.398 0.361 -0.350 2.548 1.126 -0.902 0.040 1.551 0.650 1.125 0.988 0.531 0.843 0.912 0.911
|
||||
0 0.599 -0.616 1.526 1.381 0.507 0.955 -0.646 -0.085 2.173 0.775 -0.533 1.116 2.215 0.789 -0.136 -1.176 0.000 2.449 1.435 -1.433 0.000 1.692 1.699 1.000 0.869 1.119 1.508 1.303
|
||||
1 1.100 -1.174 -1.114 1.601 -1.576 1.056 -1.343 0.547 2.173 0.555 0.367 0.592 2.215 0.580 -1.862 -0.914 0.000 0.904 0.508 -0.444 0.000 1.439 1.105 0.986 1.408 1.104 1.190 1.094
|
||||
1 2.237 -0.701 1.470 0.719 -0.199 0.745 -0.132 -0.737 1.087 0.976 -0.227 0.093 2.215 0.699 0.057 1.133 0.000 0.661 0.573 -0.679 0.000 0.785 0.772 1.752 1.235 0.856 0.990 0.825
|
||||
1 0.455 -0.880 -1.482 1.260 -0.178 1.499 0.158 1.022 0.000 1.867 -0.435 -0.675 2.215 1.234 0.783 1.586 0.000 0.641 -0.454 -0.409 3.102 1.002 0.964 0.986 0.761 0.240 1.190 0.995
|
||||
1 1.158 -0.778 -0.159 0.823 1.641 1.341 -0.830 -1.169 2.173 0.840 -1.554 0.934 0.000 0.693 0.488 -1.218 2.548 1.042 1.395 0.276 0.000 0.946 0.785 1.350 1.079 0.893 1.267 1.151
|
||||
1 0.902 -0.078 -0.055 0.872 -0.012 0.843 1.276 1.739 2.173 0.838 1.492 0.918 0.000 0.626 0.904 -0.648 2.548 0.412 -2.027 -0.883 0.000 2.838 1.664 0.988 1.803 0.768 1.244 1.280
|
||||
1 0.649 -1.028 -1.521 1.097 0.774 1.216 -0.383 -0.318 2.173 1.643 -0.285 -1.705 0.000 0.911 -0.091 0.341 0.000 0.592 0.537 0.732 3.102 0.911 0.856 1.027 1.160 0.874 0.986 0.893
|
||||
1 1.192 1.846 -0.781 1.326 -0.747 1.550 1.177 1.366 0.000 1.196 0.151 0.387 2.215 0.527 2.261 -0.190 0.000 0.390 1.474 0.381 0.000 0.986 1.025 1.004 1.392 0.761 0.965 1.043
|
||||
0 0.438 -0.358 -1.549 0.836 0.436 0.818 0.276 -0.708 2.173 0.707 0.826 0.392 0.000 1.050 1.741 -1.066 0.000 1.276 -1.583 0.842 0.000 1.475 1.273 0.986 0.853 1.593 1.255 1.226
|
||||
1 1.083 0.142 1.701 0.605 -0.253 1.237 0.791 1.183 2.173 0.842 2.850 -0.082 0.000 0.724 -0.464 -0.694 0.000 1.499 0.456 -0.226 3.102 0.601 0.799 1.102 0.995 1.389 1.013 0.851
|
||||
0 0.828 1.897 -0.615 0.572 -0.545 0.572 0.461 0.464 2.173 0.393 0.356 1.069 2.215 1.840 0.088 1.500 0.000 0.407 -0.663 -0.787 0.000 0.950 0.965 0.979 0.733 0.363 0.618 0.733
|
||||
0 0.735 1.438 1.197 1.123 -0.214 0.641 0.949 0.858 0.000 1.162 0.524 -0.896 2.215 0.992 0.454 -1.475 2.548 0.902 1.079 0.019 0.000 0.822 0.917 1.203 1.032 0.569 0.780 0.764
|
||||
0 0.437 -2.102 0.044 1.779 -1.042 1.231 -0.181 -0.515 1.087 2.666 0.863 1.466 2.215 1.370 0.345 -1.371 0.000 0.906 0.363 1.611 0.000 1.140 1.362 1.013 3.931 3.004 2.724 2.028
|
||||
1 0.881 1.814 -0.987 0.384 0.800 2.384 1.422 0.640 0.000 1.528 0.292 -0.962 1.107 2.126 -0.371 -1.401 2.548 0.700 0.109 0.203 0.000 0.450 0.813 0.985 0.956 1.013 0.993 0.774
|
||||
1 0.630 0.408 0.152 0.194 0.316 0.710 -0.824 -0.358 2.173 0.741 0.535 -0.851 2.215 0.933 0.406 1.148 0.000 0.523 -0.479 -0.625 0.000 0.873 0.960 0.988 0.830 0.921 0.711 0.661
|
||||
1 0.870 -0.448 -1.134 0.616 0.135 0.600 0.649 -0.622 2.173 0.768 0.709 -0.123 0.000 1.308 0.500 1.468 0.000 1.973 -0.286 1.462 3.102 0.909 0.944 0.990 0.835 1.250 0.798 0.776
|
||||
0 1.290 0.552 1.330 0.615 -1.353 0.661 0.240 -0.393 0.000 0.531 0.053 -1.588 0.000 0.675 0.839 -0.345 1.274 1.597 0.020 0.536 3.102 1.114 0.964 0.987 0.783 0.675 0.662 0.675
|
||||
1 0.943 0.936 1.068 1.373 0.671 2.170 -2.011 -1.032 0.000 0.640 0.361 -0.806 0.000 2.239 -0.083 0.590 2.548 1.224 0.646 -1.723 0.000 0.879 0.834 0.981 1.436 0.568 0.916 0.931
|
||||
1 0.431 1.686 -1.053 0.388 1.739 0.457 -0.471 -0.743 2.173 0.786 1.432 -0.547 2.215 0.537 -0.413 1.256 0.000 0.413 2.311 -0.408 0.000 1.355 1.017 0.982 0.689 1.014 0.821 0.715
|
||||
0 1.620 -0.055 -0.862 1.341 -1.571 0.634 -0.906 0.935 2.173 0.501 -2.198 -0.525 0.000 0.778 -0.708 -0.060 0.000 0.988 -0.621 0.489 3.102 0.870 0.956 1.216 0.992 0.336 0.871 0.889
|
||||
1 0.549 0.304 -1.443 1.309 -0.312 1.116 0.644 1.519 2.173 1.078 -0.303 -0.736 0.000 1.261 0.387 0.628 2.548 0.945 -0.190 0.090 0.000 0.893 1.043 1.000 1.124 1.077 1.026 0.886
|
||||
0 0.412 -0.618 -1.486 1.133 -0.665 0.646 0.436 1.520 0.000 0.993 0.976 0.106 2.215 0.832 0.091 0.164 2.548 0.672 -0.650 1.256 0.000 0.695 1.131 0.991 1.017 0.455 1.226 1.087
|
||||
0 1.183 -0.084 1.644 1.389 0.967 0.843 0.938 -0.670 0.000 0.480 0.256 0.123 2.215 0.437 1.644 0.491 0.000 0.501 -0.416 0.101 3.102 1.060 0.804 1.017 0.775 0.173 0.535 0.760
|
||||
0 1.629 -1.486 -0.683 2.786 -0.492 1.347 -2.638 1.453 0.000 1.857 0.208 0.873 0.000 0.519 -1.265 -1.602 1.274 0.903 -1.102 -0.329 1.551 6.892 3.522 0.998 0.570 0.477 2.039 2.006
|
||||
1 2.045 -0.671 -1.235 0.490 -0.952 0.525 -1.252 1.289 0.000 1.088 -0.993 0.648 2.215 0.975 -0.109 -0.254 2.548 0.556 -1.095 -0.194 0.000 0.803 0.861 0.980 1.282 0.945 0.925 0.811
|
||||
0 0.448 -0.058 -0.974 0.945 -1.633 1.181 -1.139 0.266 2.173 1.118 -0.761 1.502 1.107 1.706 0.585 -0.680 0.000 0.487 -1.951 0.945 0.000 2.347 1.754 0.993 1.161 1.549 1.414 1.176
|
||||
0 0.551 0.519 0.448 2.183 1.293 1.220 0.628 -0.627 2.173 1.019 -0.002 -0.652 0.000 1.843 -0.386 1.042 2.548 0.400 -1.102 -1.014 0.000 0.648 0.792 1.049 0.888 2.132 1.262 1.096
|
||||
0 1.624 0.488 1.403 0.760 0.559 0.812 0.777 -1.244 2.173 0.613 0.589 -0.030 2.215 0.692 1.058 0.683 0.000 1.054 1.165 -0.765 0.000 0.915 0.875 1.059 0.821 0.927 0.792 0.721
|
||||
1 0.774 0.444 1.257 0.515 -0.689 0.515 1.448 -1.271 0.000 0.793 0.118 0.811 1.107 0.679 0.326 -0.426 0.000 1.066 -0.865 -0.049 3.102 0.960 1.046 0.986 0.716 0.772 0.855 0.732
|
||||
1 2.093 -1.240 1.615 0.918 -1.202 1.412 -0.541 0.640 1.087 2.019 0.872 -0.639 0.000 0.672 -0.936 0.972 0.000 0.896 0.235 0.212 0.000 0.810 0.700 1.090 0.797 0.862 1.049 0.874
|
||||
1 0.908 1.069 0.283 0.400 1.293 0.609 1.452 -1.136 0.000 0.623 0.417 -0.098 2.215 1.023 0.775 1.054 1.274 0.706 2.346 -1.305 0.000 0.744 1.006 0.991 0.606 0.753 0.796 0.753
|
||||
0 0.403 -1.328 -0.065 0.901 1.052 0.708 -0.354 -0.718 2.173 0.892 0.633 1.684 2.215 0.999 -1.205 0.941 0.000 0.930 1.072 -0.809 0.000 2.105 1.430 0.989 0.838 1.147 1.042 0.883
|
||||
0 1.447 0.453 0.118 1.731 0.650 0.771 0.446 -1.564 0.000 0.973 -2.014 0.354 0.000 1.949 -0.643 -1.531 1.274 1.106 -0.334 -1.163 0.000 0.795 0.821 1.013 1.699 0.918 1.118 1.018
|
||||
1 1.794 0.123 -0.454 0.057 1.489 0.966 -1.190 1.090 1.087 0.539 -0.535 1.035 0.000 1.096 -1.069 -1.236 2.548 0.659 -1.196 -0.283 0.000 0.803 0.756 0.985 1.343 1.109 0.993 0.806
|
||||
0 1.484 -2.047 0.813 0.591 -0.295 0.923 0.312 -1.164 2.173 0.654 -0.316 0.752 2.215 0.599 1.966 -1.128 0.000 0.626 -0.304 -1.431 0.000 1.112 0.910 1.090 0.986 1.189 1.350 1.472
|
||||
0 0.417 -2.016 0.849 1.817 0.040 1.201 -1.676 -1.394 0.000 0.792 0.537 0.641 2.215 0.794 -1.222 0.187 0.000 0.825 -0.217 1.334 3.102 1.470 0.931 0.987 1.203 0.525 0.833 0.827
|
||||
1 0.603 1.009 0.033 0.486 1.225 0.884 -0.617 -1.058 0.000 0.500 -1.407 -0.567 0.000 1.476 -0.876 0.605 2.548 0.970 0.560 1.092 3.102 0.853 1.153 0.988 0.846 0.920 0.944 0.835
|
||||
1 1.381 -0.326 0.552 0.417 -0.027 1.030 -0.835 -1.287 2.173 0.941 -0.421 1.519 2.215 0.615 -1.650 0.377 0.000 0.606 0.644 0.650 0.000 1.146 0.970 0.990 1.191 0.884 0.897 0.826
|
||||
1 0.632 1.200 -0.703 0.438 -1.700 0.779 -0.731 0.958 1.087 0.605 0.393 -1.376 0.000 0.670 -0.827 -1.315 2.548 0.626 -0.501 0.417 0.000 0.904 0.903 0.998 0.673 0.803 0.722 0.640
|
||||
1 1.561 -0.569 1.580 0.329 0.237 1.059 0.731 0.415 2.173 0.454 0.016 -0.828 0.000 0.587 0.008 -0.291 1.274 0.597 1.119 1.191 0.000 0.815 0.908 0.988 0.733 0.690 0.892 0.764
|
||||
1 2.102 0.087 0.449 1.164 -0.390 1.085 -0.408 -1.116 2.173 0.578 0.197 -0.137 0.000 1.202 0.917 1.523 0.000 0.959 -0.832 1.404 3.102 1.380 1.109 1.486 1.496 0.886 1.066 1.025
|
||||
1 1.698 -0.489 -0.552 0.976 -1.009 1.620 -0.721 0.648 1.087 1.481 -1.860 -1.354 0.000 1.142 -1.140 1.401 2.548 1.000 -1.274 -0.158 0.000 1.430 1.130 0.987 1.629 1.154 1.303 1.223
|
||||
1 1.111 -0.249 -1.457 0.421 0.939 0.646 -2.076 0.362 0.000 1.315 0.796 -1.436 2.215 0.780 0.130 0.055 0.000 1.662 -0.834 0.461 0.000 0.920 0.948 0.990 1.046 0.905 1.493 1.169
|
||||
1 0.945 0.390 -1.159 1.675 0.437 0.356 0.261 0.543 1.087 0.574 0.838 1.599 2.215 0.496 -1.220 -0.022 0.000 0.558 -2.454 1.440 0.000 0.763 0.983 1.728 1.000 0.578 0.922 1.003
|
||||
1 2.076 0.014 -1.314 0.854 -0.306 3.446 1.341 0.598 0.000 2.086 0.227 -0.747 2.215 1.564 -0.216 1.649 2.548 0.965 -0.857 -1.062 0.000 0.477 0.734 1.456 1.003 1.660 1.001 0.908
|
||||
1 1.992 0.192 -0.103 0.108 -1.599 0.938 0.595 -1.360 2.173 0.869 -1.012 1.432 0.000 1.302 0.850 0.436 2.548 0.487 1.051 -1.027 0.000 0.502 0.829 0.983 1.110 1.394 0.904 0.836
|
||||
0 0.460 1.625 1.485 1.331 1.242 0.675 -0.329 -1.039 1.087 0.671 -1.028 -0.514 0.000 1.265 -0.788 0.415 1.274 0.570 -0.683 -1.738 0.000 0.725 0.758 1.004 1.024 1.156 0.944 0.833
|
||||
0 0.871 0.839 -1.536 0.428 1.198 0.875 -1.256 -0.466 1.087 0.684 -0.768 0.150 0.000 0.556 -1.793 0.389 0.000 0.942 -1.126 1.339 1.551 0.624 0.734 0.986 1.357 0.960 1.474 1.294
|
||||
1 0.951 1.651 0.576 1.273 1.495 0.834 0.048 -0.578 2.173 0.386 -0.056 -1.448 0.000 0.597 -0.196 0.162 2.548 0.524 1.649 1.625 0.000 0.737 0.901 1.124 1.014 0.556 1.039 0.845
|
||||
1 1.049 -0.223 0.685 0.256 -1.191 2.506 0.238 -0.359 0.000 1.510 -0.904 1.158 1.107 2.733 -0.902 1.679 2.548 0.407 -0.474 -1.572 0.000 1.513 2.472 0.982 1.238 0.978 1.985 1.510
|
||||
0 0.455 -0.028 0.265 1.286 1.373 0.459 0.331 -0.922 0.000 0.343 0.634 0.430 0.000 0.279 -0.084 -0.272 0.000 0.475 0.926 -0.123 3.102 0.803 0.495 0.987 0.587 0.211 0.417 0.445
|
||||
1 2.074 0.388 0.878 1.110 1.557 1.077 -0.226 -0.295 2.173 0.865 -0.319 -1.116 2.215 0.707 -0.835 0.722 0.000 0.632 -0.608 -0.728 0.000 0.715 0.802 1.207 1.190 0.960 1.143 0.926
|
||||
1 1.390 0.265 1.196 0.919 -1.371 1.858 0.506 0.786 0.000 1.280 -1.367 -0.720 2.215 1.483 -0.441 -0.675 2.548 1.076 0.294 -0.539 0.000 1.126 0.830 1.155 1.551 0.702 1.103 0.933
|
||||
1 1.014 -0.079 1.597 1.038 -0.281 1.135 -0.722 -0.177 2.173 0.544 -1.475 -1.501 0.000 1.257 -1.315 1.212 0.000 0.496 -0.060 1.180 1.551 0.815 0.611 1.411 1.110 0.792 0.846 0.853
|
||||
0 0.335 1.267 -1.154 2.011 -0.574 0.753 0.618 1.411 0.000 0.474 0.748 0.681 2.215 0.608 -0.446 -0.354 2.548 0.399 1.295 -0.581 0.000 0.911 0.882 0.975 0.832 0.598 0.580 0.678
|
||||
1 0.729 -0.189 1.182 0.293 1.310 0.412 0.459 -0.632 0.000 0.869 -1.128 -0.625 2.215 1.173 -0.893 0.478 2.548 0.584 -2.394 -1.727 0.000 2.016 1.272 0.995 1.034 0.905 0.966 1.038
|
||||
1 1.225 -1.215 -0.088 0.881 -0.237 0.600 -0.976 1.462 2.173 0.876 0.506 1.583 2.215 0.718 1.228 -0.031 0.000 0.653 -1.292 1.216 0.000 0.838 1.108 0.981 1.805 0.890 1.251 1.197
|
||||
1 2.685 -0.444 0.847 0.253 0.183 0.641 -1.541 -0.873 2.173 0.417 2.874 -0.551 0.000 0.706 -1.431 0.764 0.000 1.390 -0.596 -1.397 0.000 0.894 0.829 0.993 0.789 0.654 0.883 0.746
|
||||
0 0.638 -0.481 0.683 1.457 -1.024 0.707 -1.338 1.498 0.000 0.980 0.518 0.289 2.215 0.964 -0.531 -0.423 0.000 0.694 -0.654 -1.314 3.102 0.807 1.283 1.335 0.658 0.907 0.797 0.772
|
||||
1 1.789 -0.765 -0.732 0.421 -0.020 1.142 -1.353 1.439 2.173 0.725 -1.518 -1.261 0.000 0.812 -2.597 -0.463 0.000 1.203 -0.120 1.001 0.000 0.978 0.673 0.985 1.303 1.400 1.078 0.983
|
||||
1 0.784 -1.431 1.724 0.848 0.559 0.615 -1.643 -1.456 0.000 1.339 -0.513 0.040 2.215 0.394 -2.483 1.304 0.000 0.987 0.889 -0.339 0.000 0.732 0.713 0.987 0.973 0.705 0.875 0.759
|
||||
1 0.911 1.098 -1.289 0.421 0.823 1.218 -0.503 0.431 0.000 0.775 0.432 -1.680 0.000 0.855 -0.226 -0.460 2.548 0.646 -0.947 -1.243 1.551 2.201 1.349 0.985 0.730 0.451 0.877 0.825
|
||||
1 0.959 0.372 -0.269 1.255 0.702 1.151 0.097 0.805 2.173 0.993 1.011 0.767 2.215 1.096 0.185 0.381 0.000 1.001 -0.205 0.059 0.000 0.979 0.997 1.168 0.796 0.771 0.839 0.776
|
||||
0 0.283 -1.864 -1.663 0.219 1.624 0.955 -1.213 0.932 2.173 0.889 0.395 -0.268 0.000 0.597 -1.083 -0.921 2.548 0.584 1.325 -1.072 0.000 0.856 0.927 0.996 0.937 0.936 1.095 0.892
|
||||
0 2.017 -0.488 -0.466 1.029 -0.870 3.157 0.059 -0.343 2.173 3.881 0.872 1.502 1.107 3.631 1.720 0.963 0.000 0.633 -1.264 -1.734 0.000 4.572 3.339 1.005 1.407 5.590 3.614 3.110
|
||||
1 1.088 0.414 -0.841 0.485 0.605 0.860 1.110 -0.568 0.000 1.152 -0.325 1.203 2.215 0.324 1.652 -0.104 0.000 0.510 1.095 -1.728 0.000 0.880 0.722 0.989 0.977 0.711 0.888 0.762
|
||||
0 0.409 -1.717 0.712 0.809 -1.301 0.701 -1.529 -1.411 0.000 1.191 -0.582 0.438 2.215 1.147 0.813 -0.571 2.548 1.039 0.543 0.892 0.000 0.636 0.810 0.986 0.861 1.411 0.907 0.756
|
||||
1 1.094 1.577 -0.988 0.497 -0.149 0.891 -2.459 1.034 0.000 0.646 0.792 -1.022 0.000 1.573 0.254 -0.053 2.548 1.428 0.190 -1.641 3.102 4.322 2.687 0.985 0.881 1.135 1.907 1.831
|
||||
1 0.613 1.993 -0.280 0.544 0.931 0.909 1.526 1.559 0.000 0.840 1.473 -0.483 2.215 0.856 0.352 0.408 2.548 1.058 1.733 -1.396 0.000 0.801 1.066 0.984 0.639 0.841 0.871 0.748
|
||||
0 0.958 -1.202 0.600 0.434 0.170 0.783 -0.214 1.319 0.000 0.835 -0.454 -0.615 2.215 0.658 -1.858 -0.891 0.000 0.640 0.172 -1.204 3.102 1.790 1.086 0.997 0.804 0.403 0.793 0.756
|
||||
1 1.998 -0.238 0.972 0.058 0.266 0.759 1.576 -0.357 2.173 1.004 -0.349 -0.747 2.215 0.962 0.490 -0.453 0.000 1.592 0.661 -1.405 0.000 0.874 1.086 0.990 1.436 1.527 1.177 0.993
|
||||
1 0.796 -0.171 -0.818 0.574 -1.625 1.201 -0.737 1.451 2.173 0.651 0.404 -0.452 0.000 1.150 -0.652 -0.120 0.000 1.008 -0.093 0.531 3.102 0.884 0.706 0.979 1.193 0.937 0.943 0.881
|
||||
1 0.773 1.023 0.527 1.537 -0.201 2.967 -0.574 -1.534 2.173 2.346 -0.307 0.394 2.215 1.393 0.135 -0.027 0.000 3.015 0.187 0.516 0.000 0.819 1.260 0.982 2.552 3.862 2.179 1.786
|
||||
0 1.823 1.008 -1.489 0.234 -0.962 0.591 0.461 0.996 2.173 0.568 -1.297 -0.410 0.000 0.887 2.157 1.194 0.000 2.079 0.369 -0.085 3.102 0.770 0.945 0.995 1.179 0.971 0.925 0.983
|
||||
0 0.780 0.640 0.490 0.680 -1.301 0.715 -0.137 0.152 2.173 0.616 -0.831 1.668 0.000 1.958 0.528 -0.982 2.548 0.966 -1.551 0.462 0.000 1.034 1.079 1.008 0.827 1.369 1.152 0.983
|
||||
1 0.543 0.801 1.543 1.134 -0.772 0.954 -0.849 0.410 1.087 0.851 -1.988 1.686 0.000 0.799 -0.912 -1.156 0.000 0.479 0.097 1.334 0.000 0.923 0.597 0.989 1.231 0.759 0.975 0.867
|
||||
0 1.241 -0.014 0.129 1.158 0.670 0.445 -0.732 1.739 2.173 0.918 0.659 -1.340 2.215 0.557 2.410 -1.404 0.000 0.966 -1.545 -1.120 0.000 0.874 0.918 0.987 1.001 0.798 0.904 0.937
|
||||
0 1.751 -0.266 -1.575 0.489 1.292 1.112 1.533 0.137 2.173 1.204 -0.414 -0.928 0.000 0.879 1.237 -0.415 2.548 1.479 1.469 0.913 0.000 2.884 1.747 0.989 1.742 0.600 1.363 1.293
|
||||
1 1.505 1.208 -1.476 0.995 -0.836 2.800 -1.600 0.111 0.000 2.157 1.241 1.110 2.215 1.076 2.619 -0.913 0.000 1.678 2.204 -1.575 0.000 0.849 1.224 0.990 1.412 0.976 1.271 1.105
|
||||
0 0.816 0.611 0.779 1.694 0.278 0.575 -0.787 1.592 2.173 1.148 1.076 -0.831 2.215 0.421 1.316 0.632 0.000 0.589 0.452 -1.466 0.000 0.779 0.909 0.990 1.146 1.639 1.236 0.949
|
||||
1 0.551 -0.808 0.330 1.188 -0.294 0.447 -0.035 -0.993 0.000 0.432 -0.276 -0.481 2.215 1.959 -0.288 1.195 2.548 0.638 0.583 1.107 0.000 0.832 0.924 0.993 0.723 0.976 0.968 0.895
|
||||
0 1.316 -0.093 0.995 0.860 -0.621 0.593 -0.560 -1.599 2.173 0.524 -0.318 -0.240 2.215 0.566 0.759 -0.368 0.000 0.483 -2.030 -1.104 0.000 1.468 1.041 1.464 0.811 0.778 0.690 0.722
|
||||
1 1.528 0.067 -0.855 0.959 -1.464 1.143 -0.082 1.023 0.000 0.702 -0.763 -0.244 0.000 0.935 -0.881 0.206 2.548 0.614 -0.831 1.657 3.102 1.680 1.105 0.983 1.078 0.559 0.801 0.809
|
||||
0 0.558 -0.833 -0.598 1.436 -1.724 1.316 -0.661 1.593 2.173 1.148 -0.503 -0.132 1.107 1.584 -0.125 0.380 0.000 1.110 -1.216 -0.181 0.000 1.258 0.860 1.053 0.790 1.814 1.159 1.007
|
||||
1 0.819 0.879 1.221 0.598 -1.450 0.754 0.417 -0.369 2.173 0.477 1.199 0.274 0.000 1.073 0.368 0.273 2.548 1.599 2.047 1.690 0.000 0.933 0.984 0.983 0.788 0.613 0.728 0.717
|
||||
0 0.981 -1.007 0.489 0.923 1.261 0.436 -0.698 -0.506 2.173 0.764 -1.105 -1.241 2.215 0.577 -2.573 -0.036 0.000 0.565 -1.628 1.610 0.000 0.688 0.801 0.991 0.871 0.554 0.691 0.656
|
||||
0 2.888 0.568 -1.416 1.461 -1.157 1.756 -0.900 0.522 0.000 0.657 0.409 1.076 2.215 1.419 0.672 -0.019 0.000 1.436 -0.184 -0.980 3.102 0.946 0.919 0.995 1.069 0.890 0.834 0.856
|
||||
1 0.522 1.805 -0.963 1.136 0.418 0.727 -0.195 -1.695 2.173 0.309 2.559 -0.178 0.000 0.521 1.794 0.919 0.000 0.788 0.174 -0.406 3.102 0.555 0.729 1.011 1.385 0.753 0.927 0.832
|
||||
1 0.793 -0.162 -1.643 0.634 0.337 0.898 -0.633 1.689 0.000 0.806 -0.826 -0.356 2.215 0.890 -0.142 -1.268 0.000 1.293 0.574 0.725 0.000 0.833 1.077 0.988 0.721 0.679 0.867 0.753
|
||||
0 1.298 1.098 0.280 0.371 -0.373 0.855 -0.306 -1.186 0.000 0.977 -0.421 1.003 0.000 0.978 0.956 -1.249 2.548 0.735 0.577 -0.037 3.102 0.974 1.002 0.992 0.549 0.587 0.725 0.954
|
||||
1 0.751 -0.520 -1.653 0.168 -0.419 0.878 -1.023 -1.364 2.173 1.310 -0.667 0.863 0.000 1.196 -0.827 0.358 0.000 1.154 -0.165 -0.360 1.551 0.871 0.950 0.983 0.907 0.955 0.959 0.874
|
||||
0 1.730 0.666 -1.432 0.446 1.302 0.921 -0.203 0.621 0.000 1.171 -0.365 -0.611 1.107 0.585 0.807 1.150 0.000 0.415 -0.843 1.311 0.000 0.968 0.786 0.986 1.059 0.371 0.790 0.848
|
||||
1 0.596 -1.486 0.690 1.045 -1.344 0.928 0.867 0.820 2.173 0.610 0.999 -1.329 2.215 0.883 -0.001 -0.106 0.000 1.145 2.184 -0.808 0.000 2.019 1.256 1.056 1.751 1.037 1.298 1.518
|
||||
1 0.656 -1.993 -0.519 1.643 -0.143 0.815 0.256 1.220 1.087 0.399 -1.184 -1.458 0.000 0.738 1.361 -1.443 0.000 0.842 0.033 0.293 0.000 0.910 0.891 0.993 0.668 0.562 0.958 0.787
|
||||
1 1.127 -0.542 0.645 0.318 -1.496 0.661 -0.640 0.369 2.173 0.992 0.358 1.702 0.000 1.004 0.316 -1.109 0.000 1.616 -0.936 -0.707 1.551 0.875 1.191 0.985 0.651 0.940 0.969 0.834
|
||||
0 0.916 -1.423 -1.490 1.248 -0.538 0.625 -0.535 -0.174 0.000 0.769 -0.389 1.608 2.215 0.667 -1.138 -1.738 1.274 0.877 -0.019 0.482 0.000 0.696 0.917 1.121 0.678 0.347 0.647 0.722
|
||||
1 2.756 -0.637 -1.715 1.331 1.124 0.913 -0.296 -0.491 0.000 0.983 -0.831 0.000 2.215 1.180 -0.428 0.742 0.000 1.113 0.005 -1.157 1.551 1.681 1.096 1.462 0.976 0.917 1.009 1.040
|
||||
0 0.755 1.754 0.701 2.111 0.256 1.243 0.057 -1.502 2.173 0.565 -0.034 -1.078 1.107 0.529 1.696 -1.090 0.000 0.665 0.292 0.107 0.000 0.870 0.780 0.990 2.775 0.465 1.876 1.758
|
||||
1 0.593 -0.762 1.743 0.908 0.442 0.773 -1.357 -0.768 2.173 0.432 1.421 1.236 0.000 0.579 0.291 -0.403 0.000 0.966 -0.309 1.016 3.102 0.893 0.743 0.989 0.857 1.030 0.943 0.854
|
||||
1 0.891 -1.151 -1.269 0.504 -0.622 0.893 -0.549 0.700 0.000 0.828 -0.825 0.154 2.215 1.083 0.632 -1.141 0.000 1.059 -0.557 1.526 3.102 2.117 1.281 0.987 0.819 0.802 0.917 0.828
|
||||
1 2.358 -0.248 0.080 0.747 -0.975 1.019 1.374 1.363 0.000 0.935 0.127 -1.707 2.215 0.312 -0.827 0.017 0.000 0.737 1.059 -0.327 0.000 0.716 0.828 1.495 0.953 0.704 0.880 0.745
|
||||
0 0.660 -0.017 -1.138 0.453 1.002 0.645 0.518 0.703 2.173 0.751 0.705 -0.592 2.215 0.744 -0.909 -1.596 0.000 0.410 -1.135 0.481 0.000 0.592 0.922 0.989 0.897 0.948 0.777 0.701
|
||||
1 0.718 0.518 0.225 1.710 -0.022 1.888 -0.424 1.092 0.000 4.134 0.185 -1.366 0.000 1.415 1.293 0.242 2.548 2.351 0.264 -0.057 3.102 0.830 1.630 0.976 1.215 0.890 1.422 1.215
|
||||
1 1.160 0.203 0.941 0.594 0.212 0.636 -0.556 0.679 2.173 1.089 -0.481 -1.008 1.107 1.245 -0.056 -1.357 0.000 0.587 1.007 0.056 0.000 1.106 0.901 0.987 0.786 1.224 0.914 0.837
|
||||
1 0.697 0.542 0.619 0.985 1.481 0.745 0.415 1.644 2.173 0.903 0.495 -0.958 2.215 1.165 1.195 0.346 0.000 1.067 -0.881 -0.264 0.000 0.830 1.025 0.987 0.690 0.863 0.894 0.867
|
||||
0 1.430 0.190 -0.700 0.246 0.518 1.302 0.660 -0.247 2.173 1.185 -0.539 1.504 0.000 1.976 -0.401 1.079 0.000 0.855 -0.958 -1.110 3.102 0.886 0.953 0.993 0.889 1.400 1.376 1.119
|
||||
1 1.122 -0.795 0.202 0.397 -1.553 0.597 -1.459 -0.734 2.173 0.522 1.044 1.027 2.215 0.783 -1.243 1.701 0.000 0.371 1.737 0.199 0.000 1.719 1.176 0.988 0.723 1.583 1.063 0.914
|
||||
0 1.153 0.526 1.236 0.266 0.001 1.139 -1.236 -0.585 2.173 1.337 -0.215 -1.356 2.215 1.780 1.129 0.902 0.000 1.608 -0.391 -0.161 0.000 1.441 1.633 0.990 1.838 1.516 1.635 1.373
|
||||
1 0.760 1.012 0.758 0.937 0.051 0.941 0.687 -1.247 2.173 1.288 -0.743 0.822 0.000 1.552 1.782 -1.533 0.000 0.767 1.349 0.168 0.000 0.716 0.862 0.988 0.595 0.359 0.697 0.623
|
||||
1 1.756 -1.469 1.395 1.345 -1.595 0.817 0.017 -0.741 2.173 0.483 -0.008 0.293 0.000 1.768 -0.663 0.438 1.274 1.202 -1.387 -0.222 0.000 1.022 1.058 0.992 1.407 1.427 1.356 1.133
|
||||
0 0.397 0.582 -0.758 1.260 -1.735 0.889 -0.515 1.139 2.173 0.973 1.616 0.460 0.000 1.308 1.001 -0.709 2.548 0.858 0.995 -0.231 0.000 0.749 0.888 0.979 1.487 1.804 1.208 1.079
|
||||
0 0.515 -0.984 0.425 1.114 -0.439 1.999 0.818 1.561 0.000 1.407 0.009 -0.380 0.000 1.332 0.230 0.397 0.000 1.356 -0.616 -1.057 3.102 0.978 1.017 0.990 1.118 0.862 0.835 0.919
|
||||
1 1.368 -0.921 -0.866 0.842 -0.598 0.456 -1.176 1.219 1.087 0.419 -1.974 -0.819 0.000 0.791 -1.640 0.881 0.000 1.295 -0.782 0.442 3.102 0.945 0.761 0.974 0.915 0.535 0.733 0.651
|
||||
0 2.276 0.134 0.399 2.525 0.376 1.111 -1.078 -1.571 0.000 0.657 2.215 -0.900 0.000 1.183 -0.662 -0.508 2.548 1.436 -0.517 0.960 3.102 0.569 0.931 0.993 1.170 0.967 0.879 1.207
|
||||
0 0.849 0.907 0.124 0.652 1.585 0.715 0.355 -1.200 0.000 0.599 -0.892 1.301 0.000 1.106 1.151 0.582 0.000 1.895 -0.279 -0.568 3.102 0.881 0.945 0.998 0.559 0.649 0.638 0.660
|
||||
1 2.105 0.248 -0.797 0.530 0.206 1.957 -2.175 0.797 0.000 1.193 0.637 -1.646 2.215 0.881 1.111 -1.046 0.000 0.872 -0.185 1.085 1.551 0.986 1.343 1.151 1.069 0.714 2.063 1.951
|
||||
1 1.838 1.060 1.637 1.017 1.370 0.913 0.461 -0.609 1.087 0.766 -0.461 0.303 2.215 0.724 -0.061 0.886 0.000 0.941 1.123 -0.745 0.000 0.858 0.847 0.979 1.313 1.083 1.094 0.910
|
||||
0 0.364 1.274 1.066 1.570 -0.394 0.485 0.012 -1.716 0.000 0.317 -1.233 0.534 2.215 0.548 -2.165 0.762 0.000 0.729 0.169 -0.318 3.102 0.892 0.944 1.013 0.594 0.461 0.688 0.715
|
||||
1 0.503 1.343 -0.031 1.134 -1.204 0.590 -0.309 0.174 2.173 0.408 2.372 -0.628 0.000 1.850 0.400 1.147 2.548 0.664 -0.458 -0.885 0.000 1.445 1.283 0.989 1.280 1.118 1.127 1.026
|
||||
0 1.873 0.258 0.103 2.491 0.530 1.678 0.644 -1.738 2.173 1.432 0.848 -1.340 0.000 0.621 1.323 -1.316 0.000 0.628 0.789 -0.206 1.551 0.426 0.802 1.125 0.688 1.079 1.338 1.239
|
||||
1 0.826 -0.732 1.587 0.582 -1.236 0.495 0.757 -0.741 2.173 0.940 1.474 0.354 2.215 0.474 1.055 -1.657 0.000 0.415 1.758 0.841 0.000 0.451 0.578 0.984 0.757 0.922 0.860 0.696
|
||||
0 0.935 -1.614 -0.597 0.299 1.223 0.707 -0.853 -1.026 0.000 0.751 0.007 -1.691 0.000 1.062 -0.125 0.976 2.548 0.877 1.275 0.646 0.000 0.962 1.074 0.980 0.608 0.726 0.741 0.662
|
||||
1 0.643 0.542 -1.285 0.474 -0.366 0.667 -0.446 1.195 2.173 1.076 0.145 -0.126 0.000 0.970 -0.661 0.394 1.274 1.218 -0.184 -1.722 0.000 1.331 1.019 0.985 1.192 0.677 0.973 0.910
|
||||
0 0.713 0.164 1.080 1.427 -0.460 0.960 -0.152 -0.940 2.173 1.427 -0.901 1.036 1.107 0.440 -1.269 -0.194 0.000 0.452 1.932 -0.532 0.000 1.542 1.210 1.374 1.319 1.818 1.220 1.050
|
||||
0 0.876 -0.463 -1.224 2.458 -1.689 1.007 -0.752 0.398 0.000 2.456 -1.285 -0.152 1.107 1.641 1.838 1.717 0.000 0.458 0.194 0.488 3.102 4.848 2.463 0.986 1.981 0.974 2.642 2.258
|
||||
1 0.384 -0.275 0.387 1.403 -0.994 0.620 -1.529 1.685 0.000 1.091 -1.644 1.078 0.000 0.781 -1.311 0.326 2.548 1.228 -0.728 -0.633 1.551 0.920 0.854 0.987 0.646 0.609 0.740 0.884
|
||||
0 0.318 -1.818 -1.008 0.977 1.268 0.457 2.451 -1.522 0.000 0.881 1.351 0.461 2.215 0.929 0.239 -0.380 2.548 0.382 -0.613 1.330 0.000 1.563 1.193 0.994 0.829 0.874 0.901 1.026
|
||||
1 0.612 -1.120 1.098 0.402 -0.480 0.818 0.188 1.511 0.000 0.800 -0.253 0.977 0.000 1.175 0.271 -1.289 1.274 2.531 0.226 -0.409 3.102 0.889 0.947 0.979 1.486 0.940 1.152 1.119
|
||||
1 0.587 -0.737 -0.228 0.970 1.119 0.823 0.184 1.594 0.000 1.104 0.301 -0.818 2.215 0.819 0.712 -0.560 0.000 2.240 -0.419 0.340 3.102 1.445 1.103 0.988 0.715 1.363 1.019 0.926
|
||||
0 1.030 -0.694 -1.638 0.893 -1.074 1.160 -0.766 0.485 0.000 1.632 -0.698 -1.142 2.215 1.050 -1.092 0.952 0.000 1.475 0.286 0.125 3.102 0.914 1.075 0.982 0.732 1.493 1.219 1.079
|
||||
1 2.142 0.617 1.517 0.387 -0.862 0.345 1.203 -1.014 2.173 0.609 1.092 0.275 0.000 1.331 0.582 -0.183 2.548 0.557 1.540 -1.642 0.000 0.801 0.737 1.060 0.715 0.626 0.749 0.674
|
||||
0 1.076 0.240 -0.246 0.871 -1.241 0.496 0.282 0.746 2.173 1.095 -0.648 1.100 2.215 0.446 -1.756 0.764 0.000 0.434 0.788 -0.991 0.000 1.079 0.868 1.047 0.818 0.634 0.795 0.733
|
||||
0 1.400 0.901 -1.617 0.625 -0.163 0.661 -0.411 -1.616 2.173 0.685 0.524 0.425 0.000 0.881 -0.766 0.312 0.000 0.979 0.255 -0.667 3.102 0.898 1.105 1.253 0.730 0.716 0.738 0.795
|
||||
0 3.302 1.132 1.051 0.658 0.768 1.308 0.251 -0.374 1.087 1.673 0.015 -0.898 0.000 0.688 -0.535 1.363 1.274 0.871 1.325 -1.583 0.000 1.646 1.249 0.995 1.919 1.288 1.330 1.329
|
||||
0 1.757 0.202 0.750 0.767 -0.362 0.932 -1.033 -1.366 0.000 1.529 -1.012 -0.771 0.000 1.161 -0.287 0.059 0.000 2.185 1.147 1.099 3.102 0.795 0.529 1.354 1.144 1.491 1.319 1.161
|
||||
0 1.290 0.905 -1.711 1.017 -0.695 1.008 -1.038 0.693 2.173 1.202 -0.595 0.187 0.000 1.011 0.139 -1.607 0.000 0.789 -0.613 -1.041 3.102 1.304 0.895 1.259 1.866 0.955 1.211 1.200
|
||||
1 1.125 -0.004 1.694 0.373 0.329 0.978 0.640 -0.391 0.000 1.122 -0.376 1.521 2.215 0.432 2.413 -1.259 0.000 0.969 0.730 0.512 3.102 0.716 0.773 0.991 0.624 0.977 0.981 0.875
|
||||
0 1.081 0.861 1.252 1.621 1.474 1.293 0.600 0.630 0.000 1.991 -0.090 -0.675 2.215 0.861 1.105 -0.201 0.000 1.135 2.489 -1.659 0.000 1.089 0.657 0.991 2.179 0.412 1.334 1.071
|
||||
1 0.652 -0.294 1.241 1.034 0.490 1.033 0.551 -0.963 2.173 0.661 1.031 -1.654 2.215 1.376 -0.018 0.843 0.000 0.943 -0.329 -0.269 0.000 1.085 1.067 0.991 1.504 0.773 1.135 0.993
|
||||
1 1.408 -1.028 -1.018 0.252 -0.242 0.465 -0.364 -0.200 0.000 1.466 0.669 0.739 1.107 1.031 0.415 -1.468 2.548 0.457 -1.091 -1.722 0.000 0.771 0.811 0.979 1.459 1.204 1.041 0.866
|
||||
1 0.781 -1.143 -0.659 0.961 1.266 1.183 -0.686 0.119 2.173 1.126 -0.064 1.447 0.000 0.730 1.430 -1.535 0.000 1.601 0.513 1.658 0.000 0.871 1.345 1.184 1.058 0.620 1.107 0.978
|
||||
1 1.300 -0.616 1.032 0.751 -0.731 0.961 -0.716 1.592 0.000 2.079 -1.063 -0.271 2.215 0.475 0.518 1.695 1.274 0.395 -2.204 0.349 0.000 1.350 0.983 1.369 1.265 1.428 1.135 0.982
|
||||
1 0.833 0.809 1.657 1.637 1.019 0.705 1.077 -0.968 2.173 1.261 0.114 -0.298 1.107 1.032 0.017 0.236 0.000 0.640 -0.026 -1.598 0.000 0.894 0.982 0.981 1.250 1.054 1.018 0.853
|
||||
1 1.686 -1.090 -0.301 0.890 0.557 1.304 -0.284 -1.393 2.173 0.388 2.118 0.513 0.000 0.514 -0.015 0.891 0.000 0.460 0.547 0.627 3.102 0.942 0.524 1.186 1.528 0.889 1.015 1.122
|
||||
1 0.551 0.911 0.879 0.379 -0.796 1.154 -0.808 -0.966 0.000 1.168 -0.513 0.355 2.215 0.646 -1.309 0.773 0.000 0.544 -0.283 1.301 3.102 0.847 0.705 0.990 0.772 0.546 0.790 0.719
|
||||
1 1.597 0.793 -1.119 0.691 -1.455 0.370 0.337 1.354 0.000 0.646 -1.005 0.732 2.215 1.019 0.040 0.209 0.000 0.545 0.958 0.239 3.102 0.962 0.793 0.994 0.719 0.745 0.812 0.739
|
||||
0 1.033 -1.193 -0.452 0.247 0.970 0.503 -1.424 1.362 0.000 1.062 -0.416 -1.156 2.215 0.935 -0.023 0.555 2.548 0.410 -1.766 0.379 0.000 0.590 0.953 0.991 0.717 1.081 0.763 0.690
|
||||
1 0.859 -1.004 1.521 0.781 -0.993 0.677 0.643 -0.338 2.173 0.486 0.409 1.283 0.000 0.679 0.110 0.285 0.000 0.715 -0.735 -0.157 1.551 0.702 0.773 0.984 0.627 0.633 0.694 0.643
|
||||
0 0.612 -1.127 1.074 1.225 -0.426 0.927 -2.141 -0.473 0.000 1.290 -0.927 -1.085 2.215 1.183 1.981 -1.687 0.000 2.176 0.406 -1.581 0.000 0.945 0.651 1.170 0.895 1.604 1.179 1.142
|
||||
1 0.535 0.321 -1.095 0.281 -0.960 0.876 -0.709 -0.076 0.000 1.563 -0.666 1.536 2.215 0.773 -0.321 0.435 0.000 0.682 -0.801 -0.952 3.102 0.711 0.667 0.985 0.888 0.741 0.872 0.758
|
||||
1 0.745 1.586 1.578 0.863 -1.423 0.530 1.714 1.085 0.000 1.174 0.679 1.015 0.000 1.158 0.609 -1.186 2.548 1.851 0.832 -0.248 3.102 0.910 1.164 0.983 0.947 0.858 0.928 0.823
|
||||
0 0.677 -1.014 -1.648 1.455 1.461 0.596 -2.358 0.517 0.000 0.800 0.849 -0.743 2.215 1.024 -0.282 -1.004 0.000 1.846 -0.977 0.378 3.102 2.210 1.423 0.982 1.074 1.623 1.417 1.258
|
||||
1 0.815 -1.263 0.057 1.018 -0.208 0.339 -0.347 -1.646 2.173 1.223 0.600 -1.658 2.215 1.435 0.042 0.926 0.000 0.777 1.698 -0.698 0.000 1.022 1.058 1.000 0.784 0.477 0.886 0.836
|
||||
0 3.512 -1.094 -0.220 0.338 -0.328 1.962 -1.099 1.544 1.087 1.461 -1.305 -0.922 2.215 1.219 -1.289 0.400 0.000 0.731 0.155 1.249 0.000 1.173 1.366 0.993 2.259 2.000 1.626 1.349
|
||||
0 0.904 1.248 0.325 0.317 -1.624 0.685 -0.538 1.665 2.173 0.685 -2.145 -1.106 0.000 0.632 -1.460 1.017 0.000 1.085 -0.182 0.162 3.102 0.885 0.801 0.989 0.930 0.904 1.012 0.961
|
||||
@@ -1,270 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Use LightGBM Estimator in Azure Machine Learning\n",
|
||||
"In this notebook we will demonstrate how to run a training job using LightGBM Estimator. [LightGBM](https://lightgbm.readthedocs.io/en/latest/) is a gradient boosting framework that uses tree based learning algorithms. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"This notebook uses azureml-contrib-gbdt package, if you don't already have the package, please install by uncommenting below cell."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install azureml-contrib-gbdt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace, Run, Experiment\n",
|
||||
"import shutil, os\n",
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"from azureml.contrib.gbdt import LightGBM\n",
|
||||
"from azureml.train.dnn import Mpi\n",
|
||||
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you are using an AzureML Compute Instance, you are all set. Otherwise, go through the [configuration.ipynb](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML Workspace"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up machine learning resources"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"cluster_vm_size = \"STANDARD_DS14_V2\"\n",
|
||||
"cluster_min_nodes = 0\n",
|
||||
"cluster_max_nodes = 20\n",
|
||||
"cpu_cluster_name = 'TrainingCompute2' \n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" cpu_cluster = AmlCompute(ws, cpu_cluster_name)\n",
|
||||
" if cpu_cluster and type(cpu_cluster) is AmlCompute:\n",
|
||||
" print('found compute target: ' + cpu_cluster_name)\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" print('creating a new compute target...')\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = cluster_vm_size, \n",
|
||||
" vm_priority = 'lowpriority', \n",
|
||||
" min_nodes = cluster_min_nodes, \n",
|
||||
" max_nodes = cluster_max_nodes)\n",
|
||||
" cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, provisioning_config)\n",
|
||||
" \n",
|
||||
" # can poll for a minimum number of nodes and for a specific timeout. \n",
|
||||
" # if no min node count is provided it will use the scale settings for the cluster\n",
|
||||
" cpu_cluster.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
|
||||
" \n",
|
||||
" # For a more detailed view of current Azure Machine Learning Compute status, use get_status()\n",
|
||||
" print(cpu_cluster.get_status().serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"From this point, you can either upload training data file directly or use Datastore for training data storage\n",
|
||||
"## Upload training file from local"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"scripts_folder = \"scripts_folder\"\n",
|
||||
"if not os.path.isdir(scripts_folder):\n",
|
||||
" os.mkdir(scripts_folder)\n",
|
||||
"shutil.copy('./train.conf', os.path.join(scripts_folder, 'train.conf'))\n",
|
||||
"shutil.copy('./binary0.train', os.path.join(scripts_folder, 'binary0.train'))\n",
|
||||
"shutil.copy('./binary1.train', os.path.join(scripts_folder, 'binary1.train'))\n",
|
||||
"shutil.copy('./binary0.test', os.path.join(scripts_folder, 'binary0.test'))\n",
|
||||
"shutil.copy('./binary1.test', os.path.join(scripts_folder, 'binary1.test'))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_data_list=[\"binary0.train\", \"binary1.train\"]\n",
|
||||
"validation_data_list = [\"binary0.test\", \"binary1.test\"]\n",
|
||||
"lgbm = LightGBM(source_directory=scripts_folder, \n",
|
||||
" compute_target=cpu_cluster, \n",
|
||||
" distributed_training=Mpi(),\n",
|
||||
" node_count=2,\n",
|
||||
" lightgbm_config='train.conf',\n",
|
||||
" data=training_data_list,\n",
|
||||
" valid=validation_data_list\n",
|
||||
" )\n",
|
||||
"experiment_name = 'lightgbm-estimator-test'\n",
|
||||
"experiment = Experiment(ws, name=experiment_name)\n",
|
||||
"run = experiment.submit(lgbm, tags={\"test public docker image\": None})\n",
|
||||
"RunDetails(run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use data reference"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.datastore import Datastore\n",
|
||||
"from azureml.data.data_reference import DataReference\n",
|
||||
"datastore = ws.get_default_datastore()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore.upload(src_dir='.',\n",
|
||||
" target_path='.',\n",
|
||||
" show_progress=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_data_list=[\"binary0.train\", \"binary1.train\"]\n",
|
||||
"validation_data_list = [\"binary0.test\", \"binary1.test\"]\n",
|
||||
"lgbm = LightGBM(source_directory='.', \n",
|
||||
" compute_target=cpu_cluster, \n",
|
||||
" distributed_training=Mpi(),\n",
|
||||
" node_count=2,\n",
|
||||
" inputs=[datastore.as_mount()],\n",
|
||||
" lightgbm_config='train.conf',\n",
|
||||
" data=training_data_list,\n",
|
||||
" valid=validation_data_list\n",
|
||||
" )\n",
|
||||
"experiment_name = 'lightgbm-estimator-test'\n",
|
||||
"experiment = Experiment(ws, name=experiment_name)\n",
|
||||
"run = experiment.submit(lgbm, tags={\"use datastore.as_mount()\": None})\n",
|
||||
"RunDetails(run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# uncomment below and run if compute resources are no longer needed\n",
|
||||
"# cpu_cluster.delete() "
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "jingywa"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
name: lightgbm-example
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-contrib-gbdt
|
||||
- azureml-widgets
|
||||
- azureml-core
|
||||
@@ -1,111 +0,0 @@
|
||||
# task type, support train and predict
|
||||
task = train
|
||||
|
||||
# boosting type, support gbdt for now, alias: boosting, boost
|
||||
boosting_type = gbdt
|
||||
|
||||
# application type, support following application
|
||||
# regression , regression task
|
||||
# binary , binary classification task
|
||||
# lambdarank , lambdarank task
|
||||
# alias: application, app
|
||||
objective = binary
|
||||
|
||||
# eval metrics, support multi metric, delimite by ',' , support following metrics
|
||||
# l1
|
||||
# l2 , default metric for regression
|
||||
# ndcg , default metric for lambdarank
|
||||
# auc
|
||||
# binary_logloss , default metric for binary
|
||||
# binary_error
|
||||
metric = binary_logloss,auc
|
||||
|
||||
# frequence for metric output
|
||||
metric_freq = 1
|
||||
|
||||
# true if need output metric for training data, alias: tranining_metric, train_metric
|
||||
is_training_metric = true
|
||||
|
||||
# number of bins for feature bucket, 255 is a recommend setting, it can save memories, and also has good accuracy.
|
||||
max_bin = 255
|
||||
|
||||
# training data
|
||||
# if exsting weight file, should name to "binary.train.weight"
|
||||
# alias: train_data, train
|
||||
data = binary.train
|
||||
|
||||
# validation data, support multi validation data, separated by ','
|
||||
# if exsting weight file, should name to "binary.test.weight"
|
||||
# alias: valid, test, test_data,
|
||||
valid_data = binary.test
|
||||
|
||||
# number of trees(iterations), alias: num_tree, num_iteration, num_iterations, num_round, num_rounds
|
||||
num_trees = 100
|
||||
|
||||
# shrinkage rate , alias: shrinkage_rate
|
||||
learning_rate = 0.1
|
||||
|
||||
# number of leaves for one tree, alias: num_leaf
|
||||
num_leaves = 63
|
||||
|
||||
# type of tree learner, support following types:
|
||||
# serial , single machine version
|
||||
# feature , use feature parallel to train
|
||||
# data , use data parallel to train
|
||||
# voting , use voting based parallel to train
|
||||
# alias: tree
|
||||
tree_learner = feature
|
||||
|
||||
# number of threads for multi-threading. One thread will use one CPU, defalut is setted to #cpu.
|
||||
# num_threads = 8
|
||||
|
||||
# feature sub-sample, will random select 80% feature to train on each iteration
|
||||
# alias: sub_feature
|
||||
feature_fraction = 0.8
|
||||
|
||||
# Support bagging (data sub-sample), will perform bagging every 5 iterations
|
||||
bagging_freq = 5
|
||||
|
||||
# Bagging farction, will random select 80% data on bagging
|
||||
# alias: sub_row
|
||||
bagging_fraction = 0.8
|
||||
|
||||
# minimal number data for one leaf, use this to deal with over-fit
|
||||
# alias : min_data_per_leaf, min_data
|
||||
min_data_in_leaf = 50
|
||||
|
||||
# minimal sum hessians for one leaf, use this to deal with over-fit
|
||||
min_sum_hessian_in_leaf = 5.0
|
||||
|
||||
# save memory and faster speed for sparse feature, alias: is_sparse
|
||||
is_enable_sparse = true
|
||||
|
||||
# when data is bigger than memory size, set this to true. otherwise set false will have faster speed
|
||||
# alias: two_round_loading, two_round
|
||||
use_two_round_loading = false
|
||||
|
||||
# true if need to save data to binary file and application will auto load data from binary file next time
|
||||
# alias: is_save_binary, save_binary
|
||||
is_save_binary_file = false
|
||||
|
||||
# output model file
|
||||
output_model = LightGBM_model.txt
|
||||
|
||||
# support continuous train from trained gbdt model
|
||||
# input_model= trained_model.txt
|
||||
|
||||
# output prediction file for predict task
|
||||
# output_result= prediction.txt
|
||||
|
||||
# support continuous train from initial score file
|
||||
# input_init_score= init_score.txt
|
||||
|
||||
|
||||
# number of machines in parallel training, alias: num_machine
|
||||
num_machines = 2
|
||||
|
||||
# local listening port in parallel training, alias: local_port
|
||||
local_listen_port = 12400
|
||||
|
||||
# machines list file for parallel training, alias: mlist
|
||||
machine_list_file = mlist.txt
|
||||
@@ -4,12 +4,11 @@ Learn how to use Azure Machine Learning services for experimentation and model m
|
||||
|
||||
As a pre-requisite, run the [configuration Notebook](../configuration.ipynb) notebook first to set up your Azure ML Workspace. Then, run the notebooks in following recommended order.
|
||||
|
||||
* [train-within-notebook](./training/train-within-notebook): Train a model hile tracking run history, and learn how to deploy the model as web service to Azure Container Instance.
|
||||
* [train-within-notebook](./training/train-within-notebook): Train a model while tracking run history, and learn how to deploy the model as web service to Azure Container Instance.
|
||||
* [train-on-local](./training/train-on-local): Learn how to submit a run to local computer and use Azure ML managed run configuration.
|
||||
* [train-on-amlcompute](./training/train-on-amlcompute): Use a 1-n node Azure ML managed compute cluster for remote runs on Azure CPU or GPU infrastructure.
|
||||
* [train-on-remote-vm](./training/train-on-remote-vm): Use Data Science Virtual Machine as a target for remote runs.
|
||||
* [logging-api](./track-and-monitor-experiments/logging-api): Learn about the details of logging metrics to run history.
|
||||
* [production-deploy-to-aks](./deployment/production-deploy-to-aks) Deploy a model to production at scale on Azure Kubernetes Service.
|
||||
* [enable-app-insights-in-production-service](./deployment/enable-app-insights-in-production-service) Learn how to use App Insights with production web service.
|
||||
|
||||
Find quickstarts, end-to-end tutorials, and how-tos on the [official documentation site for Azure Machine Learning service](https://docs.microsoft.com/en-us/azure/machine-learning/service/).
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Table of Contents
|
||||
1. [Automated ML Introduction](#introduction)
|
||||
1. [Setup using Azure Notebooks](#jupyter)
|
||||
1. [Setup using Azure Databricks](#databricks)
|
||||
1. [Setup using Compute Instances](#jupyter)
|
||||
1. [Setup using a Local Conda environment](#localconda)
|
||||
1. [Setup using Azure Databricks](#databricks)
|
||||
1. [Automated ML SDK Sample Notebooks](#samples)
|
||||
1. [Documentation](#documentation)
|
||||
1. [Running using python command](#pythoncommand)
|
||||
@@ -21,13 +21,13 @@ Below are the three execution environments supported by automated ML.
|
||||
|
||||
|
||||
<a name="jupyter"></a>
|
||||
## Setup using Notebook VMs - Jupyter based notebooks from a Azure VM
|
||||
## Setup using Compute Instances - Jupyter based notebooks from a Azure Virtual Machine
|
||||
|
||||
1. Open the [ML Azure portal](https://ml.azure.com)
|
||||
1. Select Compute
|
||||
1. Select Notebook VMs
|
||||
1. Select Compute Instances
|
||||
1. Click New
|
||||
1. Type a name for the Vm and select a VM type
|
||||
1. Type a Compute Name, select a Virtual Machine type and select a Virtual Machine size
|
||||
1. Click Create
|
||||
|
||||
<a name="localconda"></a>
|
||||
@@ -97,68 +97,96 @@ jupyter notebook
|
||||
<a name="databricks"></a>
|
||||
## Setup using Azure Databricks
|
||||
|
||||
**NOTE**: Please create your Azure Databricks cluster as v6.0 (high concurrency preferred) with **Python 3** (dropdown).
|
||||
**NOTE**: Please create your Azure Databricks cluster as v7.1 (high concurrency preferred) with **Python 3** (dropdown).
|
||||
**NOTE**: You should at least have contributor access to your Azure subcription to run the notebook.
|
||||
- Please remove the previous SDK version if there is any and install the latest SDK by installing **azureml-sdk[automl]** as a PyPi library in Azure Databricks workspace.
|
||||
- You can find the detail Readme instructions at [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks).
|
||||
- Download the sample notebook automl-databricks-local-01.ipynb from [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks) and import into the Azure databricks workspace.
|
||||
- You can find the detail Readme instructions at [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks/automl).
|
||||
- Download the sample notebook automl-databricks-local-01.ipynb from [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks/automl) and import into the Azure databricks workspace.
|
||||
- Attach the notebook to the cluster.
|
||||
|
||||
<a name="samples"></a>
|
||||
# Automated ML SDK Sample Notebooks
|
||||
|
||||
- [auto-ml-classification-credit-card-fraud.ipynb](classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb)
|
||||
- Dataset: Kaggle's [credit card fraud detection dataset](https://www.kaggle.com/mlg-ulb/creditcardfraud)
|
||||
- Simple example of using automated ML for classification to fraudulent credit card transactions
|
||||
- Uses azure compute for training
|
||||
## Classification
|
||||
- **Classify Credit Card Fraud**
|
||||
- Dataset: [Kaggle's credit card fraud detection dataset](https://www.kaggle.com/mlg-ulb/creditcardfraud)
|
||||
- **[Jupyter Notebook (remote run)](classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb)**
|
||||
- run the experiment remotely on AML Compute cluster
|
||||
- test the performance of the best model in the local environment
|
||||
- **[Jupyter Notebook (local run)](local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb)**
|
||||
- run experiment in the local environment
|
||||
- use Mimic Explainer for computing feature importance
|
||||
- deploy the best model along with the explainer to an Azure Kubernetes (AKS) cluster, which will compute the raw and engineered feature importances at inference time
|
||||
- **Predict Term Deposit Subscriptions in a Bank**
|
||||
- Dataset: [UCI's bank marketing dataset](https://www.kaggle.com/janiobachmann/bank-marketing-dataset)
|
||||
- **[Jupyter Notebook](classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb)**
|
||||
- run experiment remotely on AML Compute cluster to generate ONNX compatible models
|
||||
- view the featurization steps that were applied during training
|
||||
- view feature importance for the best model
|
||||
- download the best model in ONNX format and use it for inferencing using ONNXRuntime
|
||||
- deploy the best model in PKL format to Azure Container Instance (ACI)
|
||||
- **Predict Newsgroup based on Text from News Article**
|
||||
- Dataset: [20 newsgroups text dataset](https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html)
|
||||
- **[Jupyter Notebook](classification-text-dnn/auto-ml-classification-text-dnn.ipynb)**
|
||||
- AutoML highlights here include using deep neural networks (DNNs) to create embedded features from text data
|
||||
- AutoML will use Bidirectional Encoder Representations from Transformers (BERT) when a GPU compute is used
|
||||
- Bidirectional Long-Short Term neural network (BiLSTM) will be utilized when a CPU compute is used, thereby optimizing the choice of DNN
|
||||
|
||||
- [auto-ml-regression.ipynb](regression/auto-ml-regression.ipynb)
|
||||
## Regression
|
||||
- **Predict Performance of Hardware Parts**
|
||||
- Dataset: Hardware Performance Dataset
|
||||
- Simple example of using automated ML for regression
|
||||
- Uses azure compute for training
|
||||
- **[Jupyter Notebook](regression/auto-ml-regression.ipynb)**
|
||||
- run the experiment remotely on AML Compute cluster
|
||||
- get best trained model for a different metric than the one the experiment was optimized for
|
||||
- test the performance of the best model in the local environment
|
||||
- **[Jupyter Notebook (advanced)](regression/auto-ml-regression.ipynb)**
|
||||
- run the experiment remotely on AML Compute cluster
|
||||
- customize featurization: override column purpose within the dataset, configure transformer parameters
|
||||
- get best trained model for a different metric than the one the experiment was optimized for
|
||||
- run a model explanation experiment on the remote cluster
|
||||
- deploy the model along the explainer and run online inferencing
|
||||
|
||||
- [auto-ml-regression-explanation-featurization.ipynb](regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb)
|
||||
- Dataset: Hardware Performance Dataset
|
||||
- Shows featurization and excplanation
|
||||
- Uses azure compute for training
|
||||
|
||||
- [auto-ml-forecasting-energy-demand.ipynb](forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb)
|
||||
- Dataset: [NYC energy demand data](forecasting-a/nyc_energy.csv)
|
||||
- Example of using automated ML for training a forecasting model
|
||||
|
||||
- [auto-ml-classification-credit-card-fraud-local.ipynb](local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb)
|
||||
- Dataset: Kaggle's [credit card fraud detection dataset](https://www.kaggle.com/mlg-ulb/creditcardfraud)
|
||||
- Simple example of using automated ML for classification to fraudulent credit card transactions
|
||||
- Uses local compute for training
|
||||
|
||||
- [auto-ml-classification-bank-marketing-all-features.ipynb](classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb)
|
||||
- Dataset: UCI's [bank marketing dataset](https://www.kaggle.com/janiobachmann/bank-marketing-dataset)
|
||||
- Simple example of using automated ML for classification to predict term deposit subscriptions for a bank
|
||||
- Uses azure compute for training
|
||||
|
||||
- [auto-ml-forecasting-orange-juice-sales.ipynb](forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb)
|
||||
- Dataset: [Dominick's grocery sales of orange juice](forecasting-b/dominicks_OJ.csv)
|
||||
- Example of training an automated ML forecasting model on multiple time-series
|
||||
|
||||
- [auto-ml-forecasting-bike-share.ipynb](forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb)
|
||||
- Dataset: forecasting for a bike-sharing
|
||||
- Example of training an automated ML forecasting model on multiple time-series
|
||||
|
||||
- [auto-ml-forecasting-function.ipynb](forecasting-high-frequency/auto-ml-forecasting-function.ipynb)
|
||||
- Example of training an automated ML forecasting model on multiple time-series
|
||||
|
||||
- [auto-ml-forecasting-beer-remote.ipynb](forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb)
|
||||
- Example of training an automated ML forecasting model on multiple time-series
|
||||
- Beer Production Forecasting
|
||||
|
||||
- [auto-ml-continuous-retraining.ipynb](continuous-retraining/auto-ml-continuous-retraining.ipynb)
|
||||
- Continuous retraining using Pipelines and Time-Series TabularDataset
|
||||
|
||||
- [auto-ml-classification-text-dnn.ipynb](classification-text-dnn/auto-ml-classification-text-dnn.ipynb)
|
||||
- Classification with text data using deep learning in AutoML
|
||||
- AutoML highlights here include using deep neural networks (DNNs) to create embedded features from text data.
|
||||
- Depending on the compute cluster the user provides, AutoML tried out Bidirectional Encoder Representations from Transformers (BERT) when a GPU compute is used.
|
||||
- Bidirectional Long-Short Term neural network (BiLSTM) when a CPU compute is used, thereby optimizing the choice of DNN for the uesr's setup.
|
||||
## Time Series Forecasting
|
||||
- **Forecast Energy Demand**
|
||||
- Dataset: [NYC energy demand data](http://mis.nyiso.com/public/P-58Blist.htm)
|
||||
- **[Jupyter Notebook](forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb)**
|
||||
- run experiment remotely on AML Compute cluster
|
||||
- use lags and rolling window features
|
||||
- view the featurization steps that were applied during training
|
||||
- get the best model, use it to forecast on test data and compare the accuracy of predictions against real data
|
||||
- **Forecast Orange Juice Sales (Multi-Series)**
|
||||
- Dataset: [Dominick's grocery sales of orange juice](forecasting-orange-juice-sales/dominicks_OJ.csv)
|
||||
- **[Jupyter Notebook](forecasting-orange-juice-sales/dominicks_OJ.csv)**
|
||||
- run experiment remotely on AML Compute cluster
|
||||
- customize time-series featurization, change column purpose and override transformer hyper parameters
|
||||
- evaluate locally the performance of the generated best model
|
||||
- deploy the best model as a webservice on Azure Container Instance (ACI)
|
||||
- get online predictions from the deployed model
|
||||
- **Forecast Demand of a Bike-Sharing Service**
|
||||
- Dataset: [Bike demand data](forecasting-bike-share/bike-no.csv)
|
||||
- **[Jupyter Notebook](forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb)**
|
||||
- run experiment remotely on AML Compute cluster
|
||||
- integrate holiday features
|
||||
- run rolling forecast for test set that is longer than the forecast horizon
|
||||
- compute metrics on the predictions from the remote forecast
|
||||
- **The Forecast Function Interface**
|
||||
- Dataset: Generated for sample purposes
|
||||
- **[Jupyter Notebook](forecasting-forecast-function/auto-ml-forecasting-function.ipynb)**
|
||||
- train a forecaster using a remote AML Compute cluster
|
||||
- capabilities of forecast function (e.g. forecast farther into the horizon)
|
||||
- generate confidence intervals
|
||||
- **Forecast Beverage Production**
|
||||
- Dataset: [Monthly beer production data](forecasting-beer-remote/Beer_no_valid_split_train.csv)
|
||||
- **[Jupyter Notebook](forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb)**
|
||||
- train using a remote AML Compute cluster
|
||||
- enable the DNN learning model
|
||||
- forecast on a remote compute cluster and compare different model performance
|
||||
- **Continuous Retraining with NOAA Weather Data**
|
||||
- Dataset: [NOAA weather data from Azure Open Datasets](https://azure.microsoft.com/en-us/services/open-datasets/)
|
||||
- **[Jupyter Notebook](continuous-retraining/auto-ml-continuous-retraining.ipynb)**
|
||||
- continuously retrain a model using Pipelines and AutoML
|
||||
- create a Pipeline to upload a time series dataset to an Azure blob
|
||||
- create a Pipeline to run an AutoML experiment and register the best resulting model in the Workspace
|
||||
- publish the training pipeline created and schedule it to run daily
|
||||
|
||||
<a name="documentation"></a>
|
||||
See [Configure automated machine learning experiments](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-auto-train) to learn how more about the the settings and features available for automated machine learning experiments.
|
||||
@@ -179,7 +207,7 @@ The main code of the file must be indented so that it is under this condition.
|
||||
## automl_setup fails
|
||||
1. On Windows, make sure that you are running automl_setup from an Anconda Prompt window rather than a regular cmd window. You can launch the "Anaconda Prompt" window by hitting the Start button and typing "Anaconda Prompt". If you don't see the application "Anaconda Prompt", you might not have conda or mini conda installed. In that case, you can install it [here](https://conda.io/miniconda.html)
|
||||
2. Check that you have conda 64-bit installed rather than 32-bit. You can check this with the command `conda info`. The `platform` should be `win-64` for Windows or `osx-64` for Mac.
|
||||
3. Check that you have conda 4.4.10 or later. You can check the version with the command `conda -V`. If you have a previous version installed, you can update it using the command: `conda update conda`.
|
||||
3. Check that you have conda 4.7.8 or later. You can check the version with the command `conda -V`. If you have a previous version installed, you can update it using the command: `conda update conda`.
|
||||
4. On Linux, if the error is `gcc: error trying to exec 'cc1plus': execvp: No such file or directory`, install build essentials using the command `sudo apt-get install build-essential`.
|
||||
5. Pass a new name as the first parameter to automl_setup so that it creates a new conda environment. You can view existing conda environments using `conda env list` and remove them with `conda env remove -n <environmentname>`.
|
||||
|
||||
@@ -230,6 +258,15 @@ You may check the version of tensorflow and uninstall as follows
|
||||
2) enter `pip freeze` and look for `tensorflow` , if found, the version listed should be < 1.13
|
||||
3) If the listed version is a not a supported version, `pip uninstall tensorflow` in the command shell and enter y for confirmation.
|
||||
|
||||
## KeyError: 'brand' when running AutoML on local compute or Azure Databricks cluster**
|
||||
If a new environment was created after 10 June 2020 using SDK 1.7.0 or lower, training may fail with the above error due to an update in the py-cpuinfo package. (Environments created on or before 10 June 2020 are unaffected, as well as experiments run on remote compute as cached training images are used.) To work around this issue, either of the two following steps can be taken:
|
||||
|
||||
1) Update the SDK version to 1.8.0 or higher (this will also downgrade py-cpuinfo to 5.0.0):
|
||||
`pip install --upgrade azureml-sdk[automl]`
|
||||
|
||||
2) Downgrade the installed version of py-cpuinfo to 5.0.0:
|
||||
`pip install py-cpuinfo==5.0.0`
|
||||
|
||||
## Remote run: DsvmCompute.create fails
|
||||
There are several reasons why the DsvmCompute.create can fail. The reason is usually in the error message but you have to look at the end of the error message for the detailed reason. Some common reasons are:
|
||||
1) `Compute name is invalid, it should start with a letter, be between 2 and 16 character, and only include letters (a-zA-Z), numbers (0-9) and \'-\'.` Note that underscore is not allowed in the name.
|
||||
|
||||
@@ -1,36 +1,26 @@
|
||||
name: azure_automl
|
||||
channels:
|
||||
- conda-forge
|
||||
- pytorch
|
||||
- main
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.5.2 and later.
|
||||
- pip<=19.3.1
|
||||
- python>=3.5.2,<3.6.8
|
||||
- wheel==0.30.0
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy>=1.16.0,<=1.16.2
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scipy>=1.0.0,<=1.1.0
|
||||
- scikit-learn>=0.19.0,<=0.20.3
|
||||
- pandas>=0.22.0,<=0.23.4
|
||||
- py-xgboost<=0.90
|
||||
- fbprophet==0.5
|
||||
- pytorch=1.1.0
|
||||
- cudatoolkit=9.0
|
||||
# Azure ML only supports 3.8 and later.
|
||||
- pip==22.3.1
|
||||
- python>=3.10,<3.11
|
||||
- holidays==0.29
|
||||
- scipy==1.10.1
|
||||
- tqdm==4.66.1
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-defaults
|
||||
- azureml-dataprep[pandas]
|
||||
- azureml-train-automl
|
||||
- azureml-train
|
||||
- azureml-widgets
|
||||
- azureml-pipeline
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
|
||||
channels:
|
||||
- anaconda
|
||||
- conda-forge
|
||||
- pytorch
|
||||
- azureml-widgets~=1.59.0
|
||||
- azureml-defaults~=1.59.0
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.59.0/validated_win32_requirements.txt [--no-deps]
|
||||
- matplotlib==3.7.1
|
||||
- xgboost==1.5.2
|
||||
- prophet==1.1.4
|
||||
- onnx==1.16.1
|
||||
- setuptools-git==1.2
|
||||
- spacy==3.7.4
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-3.7.1.tar.gz
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
name: azure_automl
|
||||
channels:
|
||||
- conda-forge
|
||||
- pytorch
|
||||
- main
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Azure ML only supports 3.7 and later.
|
||||
- pip==22.3.1
|
||||
- python>=3.10,<3.11
|
||||
- matplotlib==3.7.1
|
||||
- numpy>=1.21.6,<=1.23.5
|
||||
- urllib3==1.26.7
|
||||
- scipy==1.10.1
|
||||
- scikit-learn==1.5.1
|
||||
- holidays==0.29
|
||||
- pytorch::pytorch=1.11.0
|
||||
- cudatoolkit=10.1.243
|
||||
- notebook
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.59.0
|
||||
- azureml-defaults~=1.59.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==3.7.4
|
||||
- xgboost==1.5.2
|
||||
- prophet==1.1.4
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-3.7.1.tar.gz
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.59.0/validated_linux_requirements.txt [--no-deps]
|
||||
@@ -1,37 +1,26 @@
|
||||
name: azure_automl
|
||||
channels:
|
||||
- conda-forge
|
||||
- pytorch
|
||||
- main
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.5.2 and later.
|
||||
- pip<=19.3.1
|
||||
- nomkl
|
||||
- python>=3.5.2,<3.6.8
|
||||
- wheel==0.30.0
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy>=1.16.0,<=1.16.2
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scipy>=1.0.0,<=1.1.0
|
||||
- scikit-learn>=0.19.0,<=0.20.3
|
||||
- pandas>=0.22.0,<0.23.0
|
||||
- py-xgboost<=0.80
|
||||
- fbprophet==0.5
|
||||
- pytorch=1.1.0
|
||||
- cudatoolkit=9.0
|
||||
# Currently Azure ML only supports 3.7 and later.
|
||||
- pip==22.3.1
|
||||
- python>=3.10,<3.11
|
||||
- numpy>=1.21.6,<=1.23.5
|
||||
- scipy==1.10.1
|
||||
- scikit-learn==1.5.1
|
||||
- holidays==0.29
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-defaults
|
||||
- azureml-dataprep[pandas]
|
||||
- azureml-train-automl
|
||||
- azureml-train
|
||||
- azureml-widgets
|
||||
- azureml-pipeline
|
||||
- azureml-widgets~=1.59.0
|
||||
- azureml-defaults~=1.59.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
|
||||
channels:
|
||||
- anaconda
|
||||
- conda-forge
|
||||
- pytorch
|
||||
- prophet==1.1.4
|
||||
- xgboost==1.5.2
|
||||
- spacy==3.7.4
|
||||
- matplotlib==3.7.1
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-3.7.1.tar.gz
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.59.0/validated_darwin_requirements.txt [--no-deps]
|
||||
|
||||
@@ -6,11 +6,22 @@ set PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||
|
||||
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl"
|
||||
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml"
|
||||
SET check_conda_version_script="check_conda_version.py"
|
||||
|
||||
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
||||
|
||||
IF "%CONDA_EXE%"=="" GOTO CondaMissing
|
||||
|
||||
IF NOT EXIST %check_conda_version_script% GOTO VersionCheckMissing
|
||||
|
||||
python "%check_conda_version_script%"
|
||||
IF errorlevel 1 GOTO ErrorExit:
|
||||
|
||||
SET replace_version_script="replace_latest_version.ps1"
|
||||
IF EXIST %replace_version_script% (
|
||||
powershell -file %replace_version_script% %automl_env_file%
|
||||
)
|
||||
|
||||
call conda activate %conda_env_name% 2>nul:
|
||||
|
||||
if not errorlevel 1 (
|
||||
@@ -22,6 +33,8 @@ if not errorlevel 1 (
|
||||
call conda env create -f %automl_env_file% -n %conda_env_name%
|
||||
)
|
||||
|
||||
python "%conda_prefix%\scripts\pywin32_postinstall.py" -install
|
||||
|
||||
call conda activate %conda_env_name% 2>nul:
|
||||
if errorlevel 1 goto ErrorExit
|
||||
|
||||
@@ -54,6 +67,10 @@ echo If you are running an older version of Miniconda or Anaconda,
|
||||
echo you can upgrade using the command: conda update conda
|
||||
goto End
|
||||
|
||||
:VersionCheckMissing
|
||||
echo File %check_conda_version_script% not found.
|
||||
goto End
|
||||
|
||||
:YmlMissing
|
||||
echo File %automl_env_file% not found.
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ CONDA_ENV_NAME=$1
|
||||
AUTOML_ENV_FILE=$2
|
||||
OPTIONS=$3
|
||||
PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||
CHECK_CONDA_VERSION_SCRIPT="check_conda_version.py"
|
||||
|
||||
if [ "$CONDA_ENV_NAME" == "" ]
|
||||
then
|
||||
@@ -12,7 +13,7 @@ fi
|
||||
|
||||
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||
then
|
||||
AUTOML_ENV_FILE="automl_env.yml"
|
||||
AUTOML_ENV_FILE="automl_env_linux.yml"
|
||||
fi
|
||||
|
||||
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||
@@ -20,6 +21,18 @@ if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f $CHECK_CONDA_VERSION_SCRIPT ]; then
|
||||
echo "File $CHECK_CONDA_VERSION_SCRIPT not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
python "$CHECK_CONDA_VERSION_SCRIPT"
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sed -i 's/AZUREML-SDK-VERSION/latest/' $AUTOML_ENV_FILE
|
||||
|
||||
if source activate $CONDA_ENV_NAME 2> /dev/null
|
||||
then
|
||||
echo "Upgrading existing conda environment" $CONDA_ENV_NAME
|
||||
|
||||
@@ -4,6 +4,7 @@ CONDA_ENV_NAME=$1
|
||||
AUTOML_ENV_FILE=$2
|
||||
OPTIONS=$3
|
||||
PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||
CHECK_CONDA_VERSION_SCRIPT="check_conda_version.py"
|
||||
|
||||
if [ "$CONDA_ENV_NAME" == "" ]
|
||||
then
|
||||
@@ -20,6 +21,19 @@ if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f $CHECK_CONDA_VERSION_SCRIPT ]; then
|
||||
echo "File $CHECK_CONDA_VERSION_SCRIPT not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
python "$CHECK_CONDA_VERSION_SCRIPT"
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sed -i '' 's/AZUREML-SDK-VERSION/latest/' $AUTOML_ENV_FILE
|
||||
brew install libomp
|
||||
|
||||
if source activate $CONDA_ENV_NAME 2> /dev/null
|
||||
then
|
||||
echo "Upgrading existing conda environment" $CONDA_ENV_NAME
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
from setuptools._vendor.packaging import version
|
||||
import platform
|
||||
|
||||
try:
|
||||
import conda
|
||||
except Exception:
|
||||
print('Failed to import conda.')
|
||||
print('This setup is usually run from the base conda environment.')
|
||||
print('You can activate the base environment using the command "conda activate base"')
|
||||
exit(1)
|
||||
|
||||
architecture = platform.architecture()[0]
|
||||
|
||||
if architecture != "64bit":
|
||||
print('This setup requires 64bit Anaconda or Miniconda. Found: ' + architecture)
|
||||
exit(1)
|
||||
|
||||
minimumVersion = "4.7.8"
|
||||
|
||||
versionInvalid = (version.parse(conda.__version__) < version.parse(minimumVersion))
|
||||
|
||||
if versionInvalid:
|
||||
print('Setup requires conda version ' + minimumVersion + ' or higher.')
|
||||
print('You can use the command "conda update conda" to upgrade conda.')
|
||||
|
||||
exit(versionInvalid)
|
||||
@@ -13,7 +13,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -30,6 +30,7 @@
|
||||
"1. [Results](#Results)\n",
|
||||
"1. [Deploy](#Deploy)\n",
|
||||
"1. [Test](#Test)\n",
|
||||
"1. [Use auto-generated code for retraining](#Using-the-auto-generated-model-training-code-for-retraining-on-new-data)\n",
|
||||
"1. [Acknowledgements](#Acknowledgements)"
|
||||
]
|
||||
},
|
||||
@@ -41,7 +42,7 @@
|
||||
"\n",
|
||||
"In this example we use the UCI Bank Marketing dataset to showcase how you can use AutoML for a classification problem and deploy it to an Azure Container Instance (ACI). The classification goal is to predict if the client will subscribe to a term deposit with the bank.\n",
|
||||
"\n",
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
|
||||
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
|
||||
"\n",
|
||||
"Please find the ONNX related documentations [here](https://github.com/onnx/onnx).\n",
|
||||
"\n",
|
||||
@@ -55,9 +56,10 @@
|
||||
"7. Create a container image.\n",
|
||||
"8. Create an Azure Container Instance (ACI) service.\n",
|
||||
"9. Test the ACI service.\n",
|
||||
"10. Leverage the auto generated training code and use it for retraining on an updated dataset\n",
|
||||
"\n",
|
||||
"In addition this notebook showcases the following features\n",
|
||||
"- **Blacklisting** certain pipelines\n",
|
||||
"- **Blocking** certain pipelines\n",
|
||||
"- Specifying **target metrics** to indicate stopping criteria\n",
|
||||
"- Handling **missing data** in the input"
|
||||
]
|
||||
@@ -74,9 +76,12 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"name": "automl-import"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
@@ -86,10 +91,10 @@
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.automl.core.featurization import FeaturizationConfig\n",
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"from azureml.explain.model._internal.explanation_client import ExplanationClient"
|
||||
"from azureml.interpret import ExplanationClient\n",
|
||||
"from azureml.data.datapath import DataPath"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -99,16 +104,6 @@
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.4.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -138,24 +133,27 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"name": "ws-setup"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for experiment\n",
|
||||
"experiment_name = 'automl-classification-bmarketing-all'\n",
|
||||
"experiment_name = \"automl-classification-bmarketing-all\"\n",
|
||||
"\n",
|
||||
"experiment=Experiment(ws, experiment_name)\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Experiment Name\"] = experiment.name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -165,15 +163,20 @@
|
||||
"source": [
|
||||
"## Create or Attach existing AmlCompute\n",
|
||||
"You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this article on the default limits and how to request more quota."
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
@@ -185,12 +188,12 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
||||
" max_nodes=6)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
@@ -223,7 +226,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = pd.read_csv(\"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\")\n",
|
||||
"data = pd.read_csv(\n",
|
||||
" \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\"\n",
|
||||
")\n",
|
||||
"data.head()"
|
||||
]
|
||||
},
|
||||
@@ -238,7 +243,12 @@
|
||||
"\n",
|
||||
"missing_rate = 0.75\n",
|
||||
"n_missing_samples = int(np.floor(data.shape[0] * missing_rate))\n",
|
||||
"missing_samples = np.hstack((np.zeros(data.shape[0] - n_missing_samples, dtype=np.bool), np.ones(n_missing_samples, dtype=np.bool)))\n",
|
||||
"missing_samples = np.hstack(\n",
|
||||
" (\n",
|
||||
" np.zeros(data.shape[0] - n_missing_samples, dtype=bool),\n",
|
||||
" np.ones(n_missing_samples, dtype=bool),\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"rng = np.random.RandomState(0)\n",
|
||||
"rng.shuffle(missing_samples)\n",
|
||||
"missing_features = rng.randint(0, data.shape[1], n_missing_samples)\n",
|
||||
@@ -251,19 +261,23 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if not os.path.isdir('data'):\n",
|
||||
" os.mkdir('data')\n",
|
||||
" \n",
|
||||
"if not os.path.isdir(\"data\"):\n",
|
||||
" os.mkdir(\"data\")\n",
|
||||
"# Save the train data to a csv to be uploaded to the datastore\n",
|
||||
"pd.DataFrame(data).to_csv(\"data/train_data.csv\", index=False)\n",
|
||||
"\n",
|
||||
"ds = ws.get_default_datastore()\n",
|
||||
"ds.upload(src_dir='./data', target_path='bankmarketing', overwrite=True, show_progress=True)\n",
|
||||
"\n",
|
||||
" \n",
|
||||
"target = DataPath(\n",
|
||||
" datastore=ds, path_on_datastore=\"bankmarketing/train_data.csv\", name=\"bankmarketing\"\n",
|
||||
")\n",
|
||||
"Dataset.File.upload_directory(\n",
|
||||
" src_dir=\"./data\", target=target, overwrite=True, show_progress=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Upload the training data as a tabular dataset for access during training on remote compute\n",
|
||||
"train_data = Dataset.Tabular.from_delimited_files(path=ds.path('bankmarketing/train_data.csv'))\n",
|
||||
"train_data = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=ds.path(\"bankmarketing/train_data.csv\")\n",
|
||||
")\n",
|
||||
"label = \"y\""
|
||||
]
|
||||
},
|
||||
@@ -314,8 +328,8 @@
|
||||
"|**task**|classification or regression or forecasting|\n",
|
||||
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
|
||||
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
||||
"|**blacklist_models** | *List* of *strings* indicating machine learning algorithms for AutoML to avoid in this run. <br><br> Allowed values for **Classification**<br><i>LogisticRegression</i><br><i>SGD</i><br><i>MultinomialNaiveBayes</i><br><i>BernoulliNaiveBayes</i><br><i>SVM</i><br><i>LinearSVM</i><br><i>KNN</i><br><i>DecisionTree</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>GradientBoosting</i><br><i>TensorFlowDNN</i><br><i>TensorFlowLinearClassifier</i><br><br>Allowed values for **Regression**<br><i>ElasticNet</i><br><i>GradientBoosting</i><br><i>DecisionTree</i><br><i>KNN</i><br><i>LassoLars</i><br><i>SGD</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>TensorFlowLinearRegressor</i><br><i>TensorFlowDNN</i><br><br>Allowed values for **Forecasting**<br><i>ElasticNet</i><br><i>GradientBoosting</i><br><i>DecisionTree</i><br><i>KNN</i><br><i>LassoLars</i><br><i>SGD</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>TensorFlowLinearRegressor</i><br><i>TensorFlowDNN</i><br><i>Arima</i><br><i>Prophet</i>|\n",
|
||||
"| **whitelist_models** | *List* of *strings* indicating machine learning algorithms for AutoML to use in this run. Same values listed above for **blacklist_models** allowed for **whitelist_models**.|\n",
|
||||
"|**blocked_models** | *List* of *strings* indicating machine learning algorithms for AutoML to avoid in this run. <br><br> Allowed values for **Classification**<br><i>LogisticRegression</i><br><i>SGD</i><br><i>MultinomialNaiveBayes</i><br><i>BernoulliNaiveBayes</i><br><i>SVM</i><br><i>LinearSVM</i><br><i>KNN</i><br><i>DecisionTree</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>GradientBoosting</i><br><i>TensorFlowDNN</i><br><i>TensorFlowLinearClassifier</i><br><br>Allowed values for **Regression**<br><i>ElasticNet</i><br><i>GradientBoosting</i><br><i>DecisionTree</i><br><i>KNN</i><br><i>LassoLars</i><br><i>SGD</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>TensorFlowLinearRegressor</i><br><i>TensorFlowDNN</i><br><br>Allowed values for **Forecasting**<br><i>ElasticNet</i><br><i>GradientBoosting</i><br><i>DecisionTree</i><br><i>KNN</i><br><i>LassoLars</i><br><i>SGD</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>TensorFlowLinearRegressor</i><br><i>TensorFlowDNN</i><br><i>Arima</i><br><i>Prophet</i>|\n",
|
||||
"|**allowed_models** | *List* of *strings* indicating machine learning algorithms for AutoML to use in this run. Same values listed above for **blocked_models** allowed for **allowed_models**.|\n",
|
||||
"|**experiment_exit_score**| Value indicating the target for *primary_metric*. <br>Once the target is surpassed the run terminates.|\n",
|
||||
"|**experiment_timeout_hours**| Maximum amount of time in hours that all iterations combined can take before the experiment terminates.|\n",
|
||||
"|**enable_early_stopping**| Flag to enble early termination if the score is not improving in the short term.|\n",
|
||||
@@ -323,6 +337,7 @@
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|**training_data**|Input dataset, containing both features and label column.|\n",
|
||||
"|**label_column_name**|The name of the label column.|\n",
|
||||
"|**enable_code_generation**|Flag to enable generation of training code for each of the models that AutoML is creating.\n",
|
||||
"\n",
|
||||
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||
]
|
||||
@@ -334,58 +349,57 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"experiment_timeout_hours\" : 0.3,\n",
|
||||
" \"enable_early_stopping\" : True,\n",
|
||||
" \"experiment_timeout_hours\": 0.3,\n",
|
||||
" \"enable_early_stopping\": True,\n",
|
||||
" \"iteration_timeout_minutes\": 5,\n",
|
||||
" \"max_concurrent_iterations\": 4,\n",
|
||||
" \"max_cores_per_iteration\": -1,\n",
|
||||
" #\"n_cross_validations\": 2,\n",
|
||||
" \"primary_metric\": 'AUC_weighted',\n",
|
||||
" \"featurization\": 'auto',\n",
|
||||
" # \"n_cross_validations\": 2,\n",
|
||||
" \"primary_metric\": \"AUC_weighted\",\n",
|
||||
" \"featurization\": \"auto\",\n",
|
||||
" \"verbosity\": logging.INFO,\n",
|
||||
" \"enable_code_generation\": True,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" experiment_exit_score = 0.9984,\n",
|
||||
" blacklist_models = ['KNN','LinearSVM'],\n",
|
||||
" enable_onnx_compatible_models=True,\n",
|
||||
" training_data = train_data,\n",
|
||||
" label_column_name = label,\n",
|
||||
" validation_data = validation_dataset,\n",
|
||||
" **automl_settings\n",
|
||||
" )"
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"classification\",\n",
|
||||
" debug_log=\"automl_errors.log\",\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" experiment_exit_score=0.9984,\n",
|
||||
" blocked_models=[\"KNN\", \"LinearSVM\"],\n",
|
||||
" enable_onnx_compatible_models=True,\n",
|
||||
" training_data=train_data,\n",
|
||||
" label_column_name=label,\n",
|
||||
" validation_data=validation_dataset,\n",
|
||||
" **automl_settings,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while."
|
||||
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"name": "experiment-submit"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run = experiment.submit(automl_config, show_output = False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run"
|
||||
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"Run the following cell to access previous runs. Uncomment the cell below and update the run_id."
|
||||
]
|
||||
@@ -396,9 +410,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#from azureml.train.automl.run import AutoMLRun\n",
|
||||
"#remote_run = AutoMLRun(experiment=experiment, run_id='<run_ID_goes_here')\n",
|
||||
"#remote_run"
|
||||
"# from azureml.train.automl.run import AutoMLRun\n",
|
||||
"# remote_run = AutoMLRun(experiment=experiment, run_id='<run_ID_goes_here')\n",
|
||||
"# remote_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -417,7 +431,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run_customized, fitted_model_customized = remote_run.get_output()"
|
||||
"# Retrieve the best Run object\n",
|
||||
"best_run = remote_run.get_best_child()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -426,7 +441,7 @@
|
||||
"source": [
|
||||
"## Transparency\n",
|
||||
"\n",
|
||||
"View updated featurization summary"
|
||||
"View featurization summary for the best model - to study how different features were transformed. This is stored as a JSON file in the outputs directory for the run."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -435,36 +450,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"custom_featurizer = fitted_model_customized.named_steps['datatransformer']\n",
|
||||
"df = custom_featurizer.get_featurization_summary()\n",
|
||||
"pd.DataFrame(data=df)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Set `is_user_friendly=False` to get a more detailed summary for the transforms being applied."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df = custom_featurizer.get_featurization_summary(is_user_friendly=False)\n",
|
||||
"pd.DataFrame(data=df)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df = custom_featurizer.get_stats_feature_type_summary()\n",
|
||||
"pd.DataFrame(data=df)"
|
||||
"# Download the featurization summary JSON file locally\n",
|
||||
"best_run.download_file(\n",
|
||||
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Render the JSON as a pandas DataFrame\n",
|
||||
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||
" records = json.load(f)\n",
|
||||
"\n",
|
||||
"pd.DataFrame.from_records(records)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -477,11 +472,14 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"name": "run-details"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(remote_run).show() "
|
||||
"\n",
|
||||
"RunDetails(remote_run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -500,14 +498,16 @@
|
||||
"source": [
|
||||
"# Wait for the best model explanation run to complete\n",
|
||||
"from azureml.core.run import Run\n",
|
||||
"model_explainability_run_id = remote_run.get_properties().get('ModelExplainRunId')\n",
|
||||
"\n",
|
||||
"model_explainability_run_id = remote_run.id + \"_\" + \"ModelExplain\"\n",
|
||||
"print(model_explainability_run_id)\n",
|
||||
"if model_explainability_run_id is not None:\n",
|
||||
" model_explainability_run = Run(experiment=experiment, run_id=model_explainability_run_id)\n",
|
||||
" model_explainability_run.wait_for_completion()\n",
|
||||
"model_explainability_run = Run(\n",
|
||||
" experiment=experiment, run_id=model_explainability_run_id\n",
|
||||
")\n",
|
||||
"model_explainability_run.wait_for_completion()\n",
|
||||
"\n",
|
||||
"# Get the best run object\n",
|
||||
"best_run, fitted_model = remote_run.get_output()"
|
||||
"best_run = remote_run.get_best_child()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -584,6 +584,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.runtime.onnx_convert import OnnxConverter\n",
|
||||
"\n",
|
||||
"onnx_fl_path = \"./best_model.onnx\"\n",
|
||||
"OnnxConverter.save_onnx_model(onnx_mdl, onnx_fl_path)"
|
||||
]
|
||||
@@ -606,33 +607,31 @@
|
||||
"from azureml.automl.core.onnx_convert import OnnxConvertConstants\n",
|
||||
"from azureml.train.automl import constants\n",
|
||||
"\n",
|
||||
"if sys.version_info < OnnxConvertConstants.OnnxIncompatiblePythonVersion:\n",
|
||||
" python_version_compatible = True\n",
|
||||
"else:\n",
|
||||
" python_version_compatible = False\n",
|
||||
"\n",
|
||||
"import onnxruntime\n",
|
||||
"from azureml.automl.runtime.onnx_convert import OnnxInferenceHelper\n",
|
||||
"\n",
|
||||
"def get_onnx_res(run):\n",
|
||||
" res_path = 'onnx_resource.json'\n",
|
||||
" run.download_file(name=constants.MODEL_RESOURCE_PATH_ONNX, output_file_path=res_path)\n",
|
||||
" with open(res_path) as f:\n",
|
||||
" onnx_res = json.load(f)\n",
|
||||
" return onnx_res\n",
|
||||
"\n",
|
||||
"if python_version_compatible:\n",
|
||||
"def get_onnx_res(run):\n",
|
||||
" res_path = \"onnx_resource.json\"\n",
|
||||
" run.download_file(\n",
|
||||
" name=constants.MODEL_RESOURCE_PATH_ONNX, output_file_path=res_path\n",
|
||||
" )\n",
|
||||
" with open(res_path) as f:\n",
|
||||
" result = json.load(f)\n",
|
||||
" return result\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if sys.version_info < OnnxConvertConstants.OnnxIncompatiblePythonVersion:\n",
|
||||
" test_df = test_dataset.to_pandas_dataframe()\n",
|
||||
" mdl_bytes = onnx_mdl.SerializeToString()\n",
|
||||
" onnx_res = get_onnx_res(best_run)\n",
|
||||
" onnx_result = get_onnx_res(best_run)\n",
|
||||
"\n",
|
||||
" onnxrt_helper = OnnxInferenceHelper(mdl_bytes, onnx_res)\n",
|
||||
" onnxrt_helper = OnnxInferenceHelper(mdl_bytes, onnx_result)\n",
|
||||
" pred_onnx, pred_prob_onnx = onnxrt_helper.predict(test_df)\n",
|
||||
"\n",
|
||||
" print(pred_onnx)\n",
|
||||
" print(pred_prob_onnx)\n",
|
||||
"else:\n",
|
||||
" print('Please use Python version 3.6 or 3.7 to run the inference helper.')"
|
||||
" print(\"Please use Python version 3.6 or 3.7 to run the inference helper.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -643,7 +642,16 @@
|
||||
"\n",
|
||||
"### Retrieve the Best Model\n",
|
||||
"\n",
|
||||
"Below we select the best pipeline from our iterations. The `get_output` method on `automl_classifier` returns the best run and the fitted model for the last invocation. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
|
||||
"Below we select the best pipeline from our iterations. The `get_best_child` method returns the Run object for the best model based on the default primary metric. There are additional flags that can be passed to the method if we want to retrieve the best Run based on any of the other supported metrics, or if we are just interested in the best run among the ONNX compatible runs. As always, you can execute `??remote_run.get_best_child` in a new cell to view the source or docs for the function."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"??remote_run.get_best_child"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -663,7 +671,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = remote_run.get_output()"
|
||||
"best_run = remote_run.get_best_child()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -672,13 +680,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_name = best_run.properties['model_name']\n",
|
||||
"model_name = best_run.properties[\"model_name\"]\n",
|
||||
"\n",
|
||||
"script_file_name = 'inference/score.py'\n",
|
||||
"conda_env_file_name = 'inference/env.yml'\n",
|
||||
"script_file_name = \"inference/score.py\"\n",
|
||||
"\n",
|
||||
"best_run.download_file('outputs/scoring_file_v_1_0_0.py', 'inference/score.py')\n",
|
||||
"best_run.download_file('outputs/conda_env_v_1_0_0.yml', 'inference/env.yml')"
|
||||
"best_run.download_file(\"outputs/scoring_file_v_1_0_0.py\", \"inference/score.py\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -695,11 +701,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"description = 'AutoML Model trained on bank marketing data to predict if a client will subscribe to a term deposit'\n",
|
||||
"description = \"AutoML Model trained on bank marketing data to predict if a client will subscribe to a term deposit\"\n",
|
||||
"tags = None\n",
|
||||
"model = remote_run.register_model(model_name = model_name, description = description, tags = tags)\n",
|
||||
"model = remote_run.register_model(\n",
|
||||
" model_name=model_name, description=description, tags=tags\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(remote_run.model_id) # This will be written to the script file later in the notebook."
|
||||
"print(\n",
|
||||
" remote_run.model_id\n",
|
||||
") # This will be written to the script file later in the notebook."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -721,39 +731,24 @@
|
||||
"from azureml.core.model import Model\n",
|
||||
"from azureml.core.environment import Environment\n",
|
||||
"\n",
|
||||
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=conda_env_file_name)\n",
|
||||
"inference_config = InferenceConfig(entry_script=script_file_name, environment=myenv)\n",
|
||||
"inference_config = InferenceConfig(\n",
|
||||
" environment=best_run.get_environment(), entry_script=script_file_name\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, \n",
|
||||
" memory_gb = 1, \n",
|
||||
" tags = {'area': \"bmData\", 'type': \"automl_classification\"}, \n",
|
||||
" description = 'sample service for Automl Classification')\n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(\n",
|
||||
" cpu_cores=2,\n",
|
||||
" memory_gb=2,\n",
|
||||
" tags={\"area\": \"bmData\", \"type\": \"automl_classification\"},\n",
|
||||
" description=\"sample service for Automl Classification\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"aci_service_name = 'automl-sample-bankmarketing-all'\n",
|
||||
"aci_service_name = model_name.lower()\n",
|
||||
"print(aci_service_name)\n",
|
||||
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
|
||||
"aci_service.wait_for_deployment(True)\n",
|
||||
"print(aci_service.state)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Delete a Web Service\n",
|
||||
"\n",
|
||||
"Deletes the specified web service."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#aci_service.delete()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -769,7 +764,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#aci_service.get_logs()"
|
||||
"# aci_service.get_logs()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -778,7 +773,9 @@
|
||||
"source": [
|
||||
"## Test\n",
|
||||
"\n",
|
||||
"Now that the model is trained, run the test data through the trained model to get the predicted values."
|
||||
"Now that the model is trained, run the test data through the trained model to get the predicted values. This calls the ACI web service to do the prediction.\n",
|
||||
"\n",
|
||||
"Note that the JSON passed to the ACI web service is an array of rows of data. Each row should either be an array of values in the same order that was used for training or a dictionary where the keys are the same as the column names used for training. The example below uses dictionary rows."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -797,8 +794,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_test = test_dataset.drop_columns(columns=['y'])\n",
|
||||
"y_test = test_dataset.keep_columns(columns=['y'], validate=True)\n",
|
||||
"X_test = test_dataset.drop_columns(columns=[\"y\"])\n",
|
||||
"y_test = test_dataset.keep_columns(columns=[\"y\"], validate=True)\n",
|
||||
"test_dataset.take(5).to_pandas_dataframe()"
|
||||
]
|
||||
},
|
||||
@@ -818,10 +815,26 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_pred = fitted_model.predict(X_test)\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"X_test_json = X_test.to_json(orient=\"records\")\n",
|
||||
"data = '{\"data\": ' + X_test_json + \"}\"\n",
|
||||
"headers = {\"Content-Type\": \"application/json\"}\n",
|
||||
"\n",
|
||||
"resp = requests.post(aci_service.scoring_uri, data, headers=headers)\n",
|
||||
"\n",
|
||||
"y_pred = json.loads(json.loads(resp.text))[\"result\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"actual = array(y_test)\n",
|
||||
"actual = actual[:,0]\n",
|
||||
"print(y_pred.shape, \" \", actual.shape)"
|
||||
"actual = actual[:, 0]\n",
|
||||
"print(len(y_pred), \" \", len(actual))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -830,8 +843,7 @@
|
||||
"source": [
|
||||
"### Calculate metrics for the prediction\n",
|
||||
"\n",
|
||||
"Now visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values \n",
|
||||
"from the trained model that was returned."
|
||||
"Now visualize the data as a confusion matrix that compared the predicted values against the actual values.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -841,12 +853,186 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%matplotlib notebook\n",
|
||||
"test_pred = plt.scatter(actual, y_pred, color='b')\n",
|
||||
"test_test = plt.scatter(actual, actual, color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"from sklearn.metrics import confusion_matrix\n",
|
||||
"import itertools\n",
|
||||
"\n",
|
||||
"cf = confusion_matrix(actual, y_pred)\n",
|
||||
"plt.imshow(cf, cmap=plt.cm.Blues, interpolation=\"nearest\")\n",
|
||||
"plt.colorbar()\n",
|
||||
"plt.title(\"Confusion Matrix\")\n",
|
||||
"plt.xlabel(\"Predicted\")\n",
|
||||
"plt.ylabel(\"Actual\")\n",
|
||||
"class_labels = [\"no\", \"yes\"]\n",
|
||||
"tick_marks = np.arange(len(class_labels))\n",
|
||||
"plt.xticks(tick_marks, class_labels)\n",
|
||||
"plt.yticks([-0.5, 0, 1, 1.5], [\"\", \"no\", \"yes\", \"\"])\n",
|
||||
"# plotting text value inside cells\n",
|
||||
"thresh = cf.max() / 2.0\n",
|
||||
"for i, j in itertools.product(range(cf.shape[0]), range(cf.shape[1])):\n",
|
||||
" plt.text(\n",
|
||||
" j,\n",
|
||||
" i,\n",
|
||||
" format(cf[i, j], \"d\"),\n",
|
||||
" horizontalalignment=\"center\",\n",
|
||||
" color=\"white\" if cf[i, j] > thresh else \"black\",\n",
|
||||
" )\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Delete a Web Service\n",
|
||||
"\n",
|
||||
"Deletes the specified web service."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"aci_service.delete()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using the auto generated model training code for retraining on new data\n",
|
||||
"\n",
|
||||
"Because we enabled code generation when the original experiment was created, we now have access to the code that was used to generate any of the AutoML tried models. Below we'll be using the generated training script of the best model to retrain on a new dataset.\n",
|
||||
"\n",
|
||||
"For this demo, we'll begin by creating new retraining dataset by combining the Train & Validation datasets that were used in the original experiment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"original_train_data = pd.read_csv(\n",
|
||||
" \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"valid_data = pd.read_csv(\n",
|
||||
" \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_validate.csv\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# we'll emulate an updated dataset for retraining by combining the Train & Validation datasets into a new one\n",
|
||||
"retrain_pd = pd.concat([original_train_data, valid_data])\n",
|
||||
"retrain_pd.to_csv(\"data/retrain_data.csv\", index=False)\n",
|
||||
"ds.upload_files(\n",
|
||||
" files=[\"data/retrain_data.csv\"],\n",
|
||||
" target_path=\"bankmarketing/\",\n",
|
||||
" overwrite=True,\n",
|
||||
" show_progress=True,\n",
|
||||
")\n",
|
||||
"retrain_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=ds.path(\"bankmarketing/retrain_data.csv\")\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# after creating and uploading the retraining dataset, let's register it with the workspace for reuse\n",
|
||||
"retrain_dataset = retrain_dataset.register(\n",
|
||||
" workspace=ws,\n",
|
||||
" name=\"Bankmarketing_retrain\",\n",
|
||||
" description=\"Updated training dataset, includes validation data\",\n",
|
||||
" create_new_version=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we'll download the generated script for the best run and use it for retraining. For more advanced scenarios, you can customize the training script as you need: change the featurization pipeline, change the learner algorithm or its hyperparameters, etc. \n",
|
||||
"\n",
|
||||
"For this exercise, we'll leave the script as it was generated."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# download the autogenerated training script into the generated_code folder\n",
|
||||
"best_run.download_file(\n",
|
||||
" \"outputs/generated_code/script.py\", \"generated_code/training_script.py\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# view the contents of the autogenerated training script\n",
|
||||
"! cat generated_code/training_script.py"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import uuid\n",
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"from azureml._restclient.models import RunTypeV2\n",
|
||||
"from azureml._restclient.models.create_run_dto import CreateRunDto\n",
|
||||
"from azureml._restclient.run_client import RunClient\n",
|
||||
"\n",
|
||||
"codegen_runid = str(uuid.uuid4())\n",
|
||||
"client = RunClient(\n",
|
||||
" experiment.workspace.service_context,\n",
|
||||
" experiment.name,\n",
|
||||
" codegen_runid,\n",
|
||||
" experiment_id=experiment.id,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# override the training_dataset_id to point to our new retraining dataset we just registered above\n",
|
||||
"dataset_arguments = [\"--training_dataset_id\", retrain_dataset.id]\n",
|
||||
"\n",
|
||||
"# create the retraining run as a child of the AutoML generated training run\n",
|
||||
"create_run_dto = CreateRunDto(\n",
|
||||
" run_id=codegen_runid,\n",
|
||||
" parent_run_id=best_run.id,\n",
|
||||
" description=\"AutoML Codegen Script Run using an updated training dataset\",\n",
|
||||
" target=cpu_cluster_name,\n",
|
||||
" run_type_v2=RunTypeV2(orchestrator=\"Execution\", traits=[\"automl-codegen\"]),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# the script for retraining run is pointing to the AutoML generated script\n",
|
||||
"src = ScriptRunConfig(\n",
|
||||
" source_directory=\"generated_code\",\n",
|
||||
" script=\"training_script.py\",\n",
|
||||
" arguments=dataset_arguments,\n",
|
||||
" compute_target=cpu_cluster_name,\n",
|
||||
" environment=best_run.get_environment(),\n",
|
||||
")\n",
|
||||
"run_dto = client.create_run(run_id=codegen_runid, create_run_dto=create_run_dto)\n",
|
||||
"\n",
|
||||
"# submit the experiment\n",
|
||||
"retraining_run = experiment.submit(config=src, run_id=codegen_runid)\n",
|
||||
"retraining_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After the run completes, we can get download/test/deploy to the model it has built."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retraining_run.wait_for_completion()\n",
|
||||
"\n",
|
||||
"retraining_run.download_file(\"outputs/model.pkl\", \"generated_code/model.pkl\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -870,7 +1056,7 @@
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "anumamah"
|
||||
"name": "ratanase"
|
||||
}
|
||||
],
|
||||
"category": "tutorial",
|
||||
@@ -889,10 +1075,13 @@
|
||||
],
|
||||
"friendly_name": "Automated ML run with basic edition features.",
|
||||
"index_order": 5,
|
||||
"kernel_info": {
|
||||
"name": "python3-azureml"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -904,7 +1093,10 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
"version": "3.10.14"
|
||||
},
|
||||
"nteract": {
|
||||
"version": "nteract-front-end@1.0.0"
|
||||
},
|
||||
"tags": [
|
||||
"featurization",
|
||||
@@ -915,5 +1107,5 @@
|
||||
"task": "Classification"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
name: auto-ml-classification-bank-marketing-all-features
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
- onnxruntime==1.0.0
|
||||
@@ -42,7 +42,7 @@
|
||||
"\n",
|
||||
"This notebook is using remote compute to train the model.\n",
|
||||
"\n",
|
||||
"If you are using an Azure Machine Learning [Notebook VM](https://docs.microsoft.com/en-us/azure/machine-learning/service/tutorial-1st-experiment-sdk-setup), you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
|
||||
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
|
||||
"\n",
|
||||
"In this notebook you will learn how to:\n",
|
||||
"1. Create an experiment using an existing workspace.\n",
|
||||
@@ -87,16 +87,6 @@
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.4.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -106,18 +96,19 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for experiment\n",
|
||||
"experiment_name = 'automl-classification-ccard-remote'\n",
|
||||
"experiment_name = \"automl-classification-ccard-remote\"\n",
|
||||
"\n",
|
||||
"experiment=Experiment(ws, experiment_name)\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Experiment Name\"] = experiment.name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -127,6 +118,9 @@
|
||||
"source": [
|
||||
"## Create or Attach existing AmlCompute\n",
|
||||
"A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
@@ -147,12 +141,12 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=6)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
@@ -175,13 +169,15 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"name": "load-data"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
|
||||
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
||||
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
||||
"label_column_name = 'Class'"
|
||||
"label_column_name = \"Class\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -207,32 +203,35 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"name": "automl-config"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"n_cross_validations\": 3,\n",
|
||||
" \"primary_metric\": 'average_precision_score_weighted',\n",
|
||||
" \"primary_metric\": \"average_precision_score_weighted\",\n",
|
||||
" \"enable_early_stopping\": True,\n",
|
||||
" \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n",
|
||||
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n",
|
||||
" \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n",
|
||||
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n",
|
||||
" \"verbosity\": logging.INFO,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
" compute_target = compute_target,\n",
|
||||
" training_data = training_data,\n",
|
||||
" label_column_name = label_column_name,\n",
|
||||
" **automl_settings\n",
|
||||
" )"
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"classification\",\n",
|
||||
" debug_log=\"automl_errors.log\",\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" training_data=training_data,\n",
|
||||
" label_column_name=label_column_name,\n",
|
||||
" **automl_settings,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while."
|
||||
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -241,7 +240,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run = experiment.submit(automl_config, show_output = False)"
|
||||
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -251,17 +250,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# If you need to retrieve a run that already started, use the following code\n",
|
||||
"#from azureml.train.automl.run import AutoMLRun\n",
|
||||
"#remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run"
|
||||
"# from azureml.train.automl.run import AutoMLRun\n",
|
||||
"# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -293,6 +283,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"\n",
|
||||
"RunDetails(remote_run).show()"
|
||||
]
|
||||
},
|
||||
@@ -322,7 +313,7 @@
|
||||
"\n",
|
||||
"### Retrieve the Best Model\n",
|
||||
"\n",
|
||||
"Below we select the best pipeline from our iterations. The `get_output` method on `automl_classifier` returns the best run and the fitted model for the last invocation. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
|
||||
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -359,8 +350,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# convert the test data to dataframe\n",
|
||||
"X_test_df = validation_data.drop_columns(columns=[label_column_name]).to_pandas_dataframe()\n",
|
||||
"y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True).to_pandas_dataframe()"
|
||||
"X_test_df = validation_data.drop_columns(\n",
|
||||
" columns=[label_column_name]\n",
|
||||
").to_pandas_dataframe()\n",
|
||||
"y_test_df = validation_data.keep_columns(\n",
|
||||
" columns=[label_column_name], validate=True\n",
|
||||
").to_pandas_dataframe()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -394,20 +389,26 @@
|
||||
"import numpy as np\n",
|
||||
"import itertools\n",
|
||||
"\n",
|
||||
"cf =confusion_matrix(y_test_df.values,y_pred)\n",
|
||||
"plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')\n",
|
||||
"cf = confusion_matrix(y_test_df.values, y_pred)\n",
|
||||
"plt.imshow(cf, cmap=plt.cm.Blues, interpolation=\"nearest\")\n",
|
||||
"plt.colorbar()\n",
|
||||
"plt.title('Confusion Matrix')\n",
|
||||
"plt.xlabel('Predicted')\n",
|
||||
"plt.ylabel('Actual')\n",
|
||||
"class_labels = ['False','True']\n",
|
||||
"plt.title(\"Confusion Matrix\")\n",
|
||||
"plt.xlabel(\"Predicted\")\n",
|
||||
"plt.ylabel(\"Actual\")\n",
|
||||
"class_labels = [\"False\", \"True\"]\n",
|
||||
"tick_marks = np.arange(len(class_labels))\n",
|
||||
"plt.xticks(tick_marks,class_labels)\n",
|
||||
"plt.yticks([-0.5,0,1,1.5],['','False','True',''])\n",
|
||||
"plt.xticks(tick_marks, class_labels)\n",
|
||||
"plt.yticks([-0.5, 0, 1, 1.5], [\"\", \"False\", \"True\", \"\"])\n",
|
||||
"# plotting text value inside cells\n",
|
||||
"thresh = cf.max() / 2.\n",
|
||||
"for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])):\n",
|
||||
" plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')\n",
|
||||
"thresh = cf.max() / 2.0\n",
|
||||
"for i, j in itertools.product(range(cf.shape[0]), range(cf.shape[1])):\n",
|
||||
" plt.text(\n",
|
||||
" j,\n",
|
||||
" i,\n",
|
||||
" format(cf[i, j], \"d\"),\n",
|
||||
" horizontalalignment=\"center\",\n",
|
||||
" color=\"white\" if cf[i, j] > thresh else \"black\",\n",
|
||||
" )\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
@@ -424,22 +425,33 @@
|
||||
"source": [
|
||||
"This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud\n",
|
||||
"\n",
|
||||
"The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00c3\u00a9 Libre de Bruxelles) on big data mining and fraud detection.\n",
|
||||
"More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project\n",
|
||||
"\n",
|
||||
"The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00c3\u0192\u00c2\u00a9 Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project\n",
|
||||
"Please cite the following works: \n",
|
||||
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tAndrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n",
|
||||
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tDal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\n",
|
||||
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tDal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n",
|
||||
"o\tDal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\n",
|
||||
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tCarcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00c3\u0192\u00c2\u00abl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\n",
|
||||
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tCarcillo, Fabrizio; Le Borgne, Yann-A\u00c3\u0192\u00c2\u00abl; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing"
|
||||
"Please cite the following works:\n",
|
||||
"\n",
|
||||
"Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n",
|
||||
"\n",
|
||||
"Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\n",
|
||||
"\n",
|
||||
"Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n",
|
||||
"\n",
|
||||
"Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\n",
|
||||
"\n",
|
||||
"Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00c3\u00abl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\n",
|
||||
"\n",
|
||||
"Carcillo, Fabrizio; Le Borgne, Yann-A\u00c3\u00abl; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\n",
|
||||
"\n",
|
||||
"Bertrand Lebichot, Yann-A\u00c3\u00abl Le Borgne, Liyun He, Frederic Obl\u00c3\u00a9, Gianluca Bontempi Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection, INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\n",
|
||||
"\n",
|
||||
"Fabrizio Carcillo, Yann-A\u00c3\u00abl Le Borgne, Olivier Caelen, Frederic Obl\u00c3\u00a9, Gianluca Bontempi Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection Information Sciences, 2019"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "tzvikei"
|
||||
"name": "ratanase"
|
||||
}
|
||||
],
|
||||
"category": "tutorial",
|
||||
@@ -460,9 +472,9 @@
|
||||
"friendly_name": "Classification of credit card fraudulent transactions using Automated ML",
|
||||
"index_order": 5,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
name: auto-ml-classification-credit-card-fraud
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
@@ -1,587 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Automated Machine Learning\n",
|
||||
"_**Text Classification Using Deep Learning**_\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Data](#Data)\n",
|
||||
"1. [Train](#Train)\n",
|
||||
"1. [Evaluate](#Evaluate)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"This notebook demonstrates classification with text data using deep learning in AutoML.\n",
|
||||
"\n",
|
||||
"AutoML highlights here include using deep neural networks (DNNs) to create embedded features from text data. Depending on the compute cluster the user provides, AutoML tried out Bidirectional Encoder Representations from Transformers (BERT) when a GPU compute is used, and Bidirectional Long-Short Term neural network (BiLSTM) when a CPU compute is used, thereby optimizing the choice of DNN for the uesr's setup.\n",
|
||||
"\n",
|
||||
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
||||
"\n",
|
||||
"An Enterprise workspace is required for this notebook. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade).\n",
|
||||
"\n",
|
||||
"Notebook synopsis:\n",
|
||||
"1. Creating an Experiment in an existing Workspace\n",
|
||||
"2. Configuration and remote run of AutoML for a text dataset (20 Newsgroups dataset from scikit-learn) for classification\n",
|
||||
"3. Registering the best model for future use\n",
|
||||
"4. Evaluating the final model on a test set"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"import os\n",
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"from azureml.core.compute import AmlCompute\n",
|
||||
"from azureml.core.compute import ComputeTarget\n",
|
||||
"from azureml.core.run import Run\n",
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"from azureml.core.model import Model \n",
|
||||
"from helper import run_inference, get_result_df\n",
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"from sklearn.datasets import fetch_20newsgroups"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.4.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As part of the setup you have already created a <b>Workspace</b>. To run AutoML, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# Choose an experiment name.\n",
|
||||
"experiment_name = 'automl-classification-text-dnn'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace Name'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up a compute cluster\n",
|
||||
"This section uses a user-provided compute cluster (named \"dnntext-cluster\" in this example). If a cluster with this name does not exist in the user's workspace, the below code will create a new cluster. You can choose the parameters of the cluster as mentioned in the comments.\n",
|
||||
"\n",
|
||||
"Whether you provide/select a CPU or GPU cluster, AutoML will choose the appropriate DNN for that setup - BiLSTM or BERT text featurizer will be included in the candidate featurizers on CPU and GPU respectively. If your goal is to obtain the most accurate model, we recommend you use GPU clusters since BERT featurizers usually outperform BiLSTM featurizers."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your cluster.\n",
|
||||
"amlcompute_cluster_name = \"dnntext-cluster\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_NC6\", # CPU for BiLSTM, such as \"STANDARD_D2_V2\" \n",
|
||||
" # To use BERT (this is recommended for best performance), select a GPU such as \"STANDARD_NC6\" \n",
|
||||
" # or similar GPU option\n",
|
||||
" # available in your workspace\n",
|
||||
" max_nodes = 1)\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Get data\n",
|
||||
"For this notebook we will use 20 Newsgroups data from scikit-learn. We filter the data to contain four classes and take a sample as training data. Please note that for accuracy improvement, more data is needed. For this notebook we provide a small-data example so that you can use this template to use with your larger sized data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data_dir = \"text-dnn-data\" # Local directory to store data\n",
|
||||
"blobstore_datadir = data_dir # Blob store directory to store data in\n",
|
||||
"target_column_name = 'y'\n",
|
||||
"feature_column_name = 'X'\n",
|
||||
"\n",
|
||||
"def get_20newsgroups_data():\n",
|
||||
" '''Fetches 20 Newsgroups data from scikit-learn\n",
|
||||
" Returns them in form of pandas dataframes\n",
|
||||
" '''\n",
|
||||
" remove = ('headers', 'footers', 'quotes')\n",
|
||||
" categories = [\n",
|
||||
" 'rec.sport.baseball',\n",
|
||||
" 'rec.sport.hockey',\n",
|
||||
" 'comp.graphics',\n",
|
||||
" 'sci.space',\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" data = fetch_20newsgroups(subset = 'train', categories = categories,\n",
|
||||
" shuffle = True, random_state = 42,\n",
|
||||
" remove = remove)\n",
|
||||
" data = pd.DataFrame({feature_column_name: data.data, target_column_name: data.target})\n",
|
||||
"\n",
|
||||
" data_train = data[:200]\n",
|
||||
" data_test = data[200:300] \n",
|
||||
"\n",
|
||||
" data_train = remove_blanks_20news(data_train, feature_column_name, target_column_name)\n",
|
||||
" data_test = remove_blanks_20news(data_test, feature_column_name, target_column_name)\n",
|
||||
" \n",
|
||||
" return data_train, data_test\n",
|
||||
" \n",
|
||||
"def remove_blanks_20news(data, feature_column_name, target_column_name):\n",
|
||||
" \n",
|
||||
" data[feature_column_name] = data[feature_column_name].replace(r'\\n', ' ', regex=True).apply(lambda x: x.strip())\n",
|
||||
" data = data[data[feature_column_name] != '']\n",
|
||||
" \n",
|
||||
" return data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Fetch data and upload to datastore for use in training"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data_train, data_test = get_20newsgroups_data()\n",
|
||||
"\n",
|
||||
"if not os.path.isdir(data_dir):\n",
|
||||
" os.mkdir(data_dir)\n",
|
||||
" \n",
|
||||
"train_data_fname = data_dir + '/train_data.csv'\n",
|
||||
"test_data_fname = data_dir + '/test_data.csv'\n",
|
||||
"\n",
|
||||
"data_train.to_csv(train_data_fname, index=False)\n",
|
||||
"data_test.to_csv(test_data_fname, index=False)\n",
|
||||
"\n",
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore.upload(src_dir=data_dir, target_path=blobstore_datadir,\n",
|
||||
" overwrite=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, blobstore_datadir + '/train_data.csv')])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prepare AutoML run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This step requires an Enterprise workspace to gain access to this feature. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"experiment_timeout_minutes\": 20,\n",
|
||||
" \"primary_metric\": 'accuracy',\n",
|
||||
" \"max_concurrent_iterations\": 4, \n",
|
||||
" \"max_cores_per_iteration\": -1,\n",
|
||||
" \"enable_dnn\": True,\n",
|
||||
" \"enable_early_stopping\": True,\n",
|
||||
" \"validation_size\": 0.3,\n",
|
||||
" \"verbosity\": logging.INFO,\n",
|
||||
" \"enable_voting_ensemble\": False,\n",
|
||||
" \"enable_stack_ensemble\": False,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" training_data=train_dataset,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" **automl_settings\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Submit AutoML Run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_run = experiment.submit(automl_config, show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Displaying the run objects gives you links to the visual tools in the Azure Portal. Go try them!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve the Best Model\n",
|
||||
"Below we select the best model pipeline from our iterations, use it to test on test data on the same compute cluster."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can test the model locally to get a feel of the input/output. When the model contains BERT, this step will require pytorch and pytorch-transformers installed in your local environment. The exact versions of these packages can be found in the **automl_env.yml** file located in the local copy of your MachineLearningNotebooks folder here:\n",
|
||||
"MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/automl_env.yml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = automl_run.get_output()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can now see what text transformations are used to convert text data to features for this dataset, including deep learning transformations based on BiLSTM or Transformer (BERT is one implementation of a Transformer) models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text_transformations_used = []\n",
|
||||
"for column_group in fitted_model.named_steps['datatransformer'].get_featurization_summary():\n",
|
||||
" text_transformations_used.extend(column_group['Transformations'])\n",
|
||||
"text_transformations_used"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Registering the best model\n",
|
||||
"We now register the best fitted model from the AutoML Run for use in future deployments. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Get results stats, extract the best model from AutoML run, download and register the resultant best model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"summary_df = get_result_df(automl_run)\n",
|
||||
"best_dnn_run_id = summary_df['run_id'].iloc[0]\n",
|
||||
"best_dnn_run = Run(experiment, best_dnn_run_id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_dir = 'Model' # Local folder where the model will be stored temporarily\n",
|
||||
"if not os.path.isdir(model_dir):\n",
|
||||
" os.mkdir(model_dir)\n",
|
||||
" \n",
|
||||
"best_dnn_run.download_file('outputs/model.pkl', model_dir + '/model.pkl')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Register the model in your Azure Machine Learning Workspace. If you previously registered a model, please make sure to delete it so as to replace it with this new model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Register the model\n",
|
||||
"model_name = 'textDNN-20News'\n",
|
||||
"model = Model.register(model_path = model_dir + '/model.pkl',\n",
|
||||
" model_name = model_name,\n",
|
||||
" tags=None,\n",
|
||||
" workspace=ws)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Evaluate on Test Data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now use the best fitted model from the AutoML Run to make predictions on the test set. \n",
|
||||
"\n",
|
||||
"Test set schema should match that of the training set."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, blobstore_datadir + '/test_data.csv')])\n",
|
||||
"\n",
|
||||
"# preview the first 3 rows of the dataset\n",
|
||||
"test_dataset.take(3).to_pandas_dataframe()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"script_folder = os.path.join(os.getcwd(), 'inference')\n",
|
||||
"os.makedirs(script_folder, exist_ok=True)\n",
|
||||
"shutil.copy('infer.py', script_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run, test_dataset,\n",
|
||||
" target_column_name, model_name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Display computed metrics"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"RunDetails(test_run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_run.wait_for_completion()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pd.Series(test_run.get_metrics())"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "anshirga"
|
||||
}
|
||||
],
|
||||
"compute": [
|
||||
"AML Compute"
|
||||
],
|
||||
"datasets": [
|
||||
"None"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"None"
|
||||
],
|
||||
"friendly_name": "DNN Text Featurization",
|
||||
"index_order": 2,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
},
|
||||
"tags": [
|
||||
"None"
|
||||
],
|
||||
"task": "Text featurization using DNNs for classification"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
name: auto-ml-classification-text-dnn
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
- https://download.pytorch.org/whl/cpu/torch-1.1.0-cp35-cp35m-win_amd64.whl
|
||||
- sentencepiece==0.1.82
|
||||
- pytorch-transformers==1.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
@@ -1,60 +0,0 @@
|
||||
import pandas as pd
|
||||
from azureml.core import Environment
|
||||
from azureml.core.conda_dependencies import CondaDependencies
|
||||
from azureml.train.estimator import Estimator
|
||||
from azureml.core.run import Run
|
||||
|
||||
|
||||
def run_inference(test_experiment, compute_target, script_folder, train_run,
|
||||
test_dataset, target_column_name, model_name):
|
||||
|
||||
train_run.download_file('outputs/conda_env_v_1_0_0.yml',
|
||||
'inference/condafile.yml')
|
||||
|
||||
inference_env = Environment("myenv")
|
||||
inference_env.docker.enabled = True
|
||||
inference_env.python.conda_dependencies = CondaDependencies(
|
||||
conda_dependencies_file_path='inference/condafile.yml')
|
||||
|
||||
est = Estimator(source_directory=script_folder,
|
||||
entry_script='infer.py',
|
||||
script_params={
|
||||
'--target_column_name': target_column_name,
|
||||
'--model_name': model_name
|
||||
},
|
||||
inputs=[test_dataset.as_named_input('test_data')],
|
||||
compute_target=compute_target,
|
||||
environment_definition=inference_env)
|
||||
|
||||
run = test_experiment.submit(
|
||||
est, tags={
|
||||
'training_run_id': train_run.id,
|
||||
'run_algorithm': train_run.properties['run_algorithm'],
|
||||
'valid_score': train_run.properties['score'],
|
||||
'primary_metric': train_run.properties['primary_metric']
|
||||
})
|
||||
|
||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
||||
return run
|
||||
|
||||
|
||||
def get_result_df(remote_run):
|
||||
|
||||
children = list(remote_run.get_children(recursive=True))
|
||||
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm',
|
||||
'primary_metric', 'Score'])
|
||||
goal_minimize = False
|
||||
for run in children:
|
||||
if('run_algorithm' in run.properties and 'score' in run.properties):
|
||||
summary_df[run.id] = [run.id, run.properties['run_algorithm'],
|
||||
run.properties['primary_metric'],
|
||||
float(run.properties['score'])]
|
||||
if('goal' in run.properties):
|
||||
goal_minimize = run.properties['goal'].split('_')[-1] == 'min'
|
||||
|
||||
summary_df = summary_df.T.sort_values(
|
||||
'Score',
|
||||
ascending=goal_minimize).drop_duplicates(['run_algorithm'])
|
||||
summary_df = summary_df.set_index('run_algorithm')
|
||||
|
||||
return summary_df
|
||||
@@ -1,54 +0,0 @@
|
||||
import numpy as np
|
||||
import argparse
|
||||
from azureml.core import Run
|
||||
from sklearn.externals import joblib
|
||||
from azureml.automl.core._vendor.automl.client.core.common import metrics
|
||||
from automl.client.core.common import constants
|
||||
from azureml.core.model import Model
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
help='Target Column Name')
|
||||
parser.add_argument(
|
||||
'--model_name', type=str, dest='model_name',
|
||||
help='Name of registered model')
|
||||
|
||||
args = parser.parse_args()
|
||||
target_column_name = args.target_column_name
|
||||
model_name = args.model_name
|
||||
|
||||
print('args passed are: ')
|
||||
print('Target column name: ', target_column_name)
|
||||
print('Name of registered model: ', model_name)
|
||||
|
||||
model_path = Model.get_model_path(model_name)
|
||||
# deserialize the model file back into a sklearn model
|
||||
model = joblib.load(model_path)
|
||||
|
||||
run = Run.get_context()
|
||||
# get input dataset by name
|
||||
test_dataset = run.input_datasets['test_data']
|
||||
|
||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name]) \
|
||||
.to_pandas_dataframe()
|
||||
y_test_df = test_dataset.with_timestamp_columns(None) \
|
||||
.keep_columns(columns=[target_column_name]) \
|
||||
.to_pandas_dataframe()
|
||||
|
||||
predicted = model.predict_proba(X_test_df)
|
||||
|
||||
# use automl metrics module
|
||||
scores = metrics.compute_metrics_classification(
|
||||
np.array(predicted),
|
||||
np.array(y_test_df),
|
||||
class_labels=model.classes_,
|
||||
metrics=list(constants.Metric.SCALAR_CLASSIFICATION_SET)
|
||||
)
|
||||
|
||||
print("scores:")
|
||||
print(scores)
|
||||
|
||||
for key, value in scores.items():
|
||||
run.log(key, value)
|
||||
@@ -4,7 +4,8 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
@@ -12,7 +13,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -32,13 +33,6 @@
|
||||
"8. [Test Retraining](#Test-Retraining)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"An Enterprise workspace is required for this notebook. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page.](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -82,16 +76,6 @@
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.4.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -125,17 +109,18 @@
|
||||
"dstor = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"# Choose a name for the run history container in the workspace.\n",
|
||||
"experiment_name = 'retrain-noaaweather'\n",
|
||||
"experiment_name = \"retrain-noaaweather\"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -148,9 +133,12 @@
|
||||
"#### Create or Attach existing AmlCompute\n",
|
||||
"\n",
|
||||
"You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this article on the default limits and how to request more quota."
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -168,12 +156,12 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
||||
" max_nodes=4)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
@@ -190,7 +178,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.runconfig import CondaDependencies, DEFAULT_CPU_IMAGE, RunConfiguration\n",
|
||||
"from azureml.core.runconfig import CondaDependencies, RunConfiguration\n",
|
||||
"\n",
|
||||
"# create a new RunConfig object\n",
|
||||
"conda_run_config = RunConfiguration(framework=\"python\")\n",
|
||||
@@ -199,15 +187,20 @@
|
||||
"conda_run_config.target = compute_target\n",
|
||||
"\n",
|
||||
"conda_run_config.environment.docker.enabled = True\n",
|
||||
"conda_run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE\n",
|
||||
"\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'applicationinsights', 'azureml-opendatasets'], \n",
|
||||
" conda_packages=['numpy==1.16.2'], \n",
|
||||
" pin_sdk_version=False)\n",
|
||||
"#cd.add_pip_package('azureml-explain-model')\n",
|
||||
"cd = CondaDependencies.create(\n",
|
||||
" pip_packages=[\n",
|
||||
" \"azureml-sdk[automl]\",\n",
|
||||
" \"applicationinsights\",\n",
|
||||
" \"azureml-opendatasets\",\n",
|
||||
" \"azureml-defaults\",\n",
|
||||
" ],\n",
|
||||
" conda_packages=[\"numpy==1.19.5\"],\n",
|
||||
" pin_sdk_version=False,\n",
|
||||
")\n",
|
||||
"conda_run_config.environment.python.conda_dependencies = cd\n",
|
||||
"\n",
|
||||
"print('run config is ready')"
|
||||
"print(\"run config is ready\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -224,7 +217,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# The name and target column of the Dataset to create \n",
|
||||
"# The name and target column of the Dataset to create\n",
|
||||
"dataset = \"NOAA-Weather-DS4\"\n",
|
||||
"target_column_name = \"temperature\""
|
||||
]
|
||||
@@ -248,12 +241,14 @@
|
||||
"from azureml.pipeline.steps import PythonScriptStep\n",
|
||||
"\n",
|
||||
"ds_name = PipelineParameter(name=\"ds_name\", default_value=dataset)\n",
|
||||
"upload_data_step = PythonScriptStep(script_name=\"upload_weather_data.py\", \n",
|
||||
" allow_reuse=False,\n",
|
||||
" name=\"upload_weather_data\",\n",
|
||||
" arguments=[\"--ds_name\", ds_name],\n",
|
||||
" compute_target=compute_target, \n",
|
||||
" runconfig=conda_run_config)"
|
||||
"upload_data_step = PythonScriptStep(\n",
|
||||
" script_name=\"upload_weather_data.py\",\n",
|
||||
" allow_reuse=False,\n",
|
||||
" name=\"upload_weather_data\",\n",
|
||||
" arguments=[\"--ds_name\", ds_name],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" runconfig=conda_run_config,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -270,10 +265,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data_pipeline = Pipeline(\n",
|
||||
" description=\"pipeline_with_uploaddata\",\n",
|
||||
" workspace=ws, \n",
|
||||
" steps=[upload_data_step])\n",
|
||||
"data_pipeline_run = experiment.submit(data_pipeline, pipeline_parameters={\"ds_name\":dataset})"
|
||||
" description=\"pipeline_with_uploaddata\", workspace=ws, steps=[upload_data_step]\n",
|
||||
")\n",
|
||||
"data_pipeline_run = experiment.submit(\n",
|
||||
" data_pipeline, pipeline_parameters={\"ds_name\": dataset}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -313,13 +309,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data_prep_step = PythonScriptStep(script_name=\"check_data.py\", \n",
|
||||
" allow_reuse=False,\n",
|
||||
" name=\"check_data\",\n",
|
||||
" arguments=[\"--ds_name\", ds_name,\n",
|
||||
" \"--model_name\", model_name],\n",
|
||||
" compute_target=compute_target, \n",
|
||||
" runconfig=conda_run_config)"
|
||||
"data_prep_step = PythonScriptStep(\n",
|
||||
" script_name=\"check_data.py\",\n",
|
||||
" allow_reuse=False,\n",
|
||||
" name=\"check_data\",\n",
|
||||
" arguments=[\"--ds_name\", ds_name, \"--model_name\", model_name],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" runconfig=conda_run_config,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -329,6 +326,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"\n",
|
||||
"train_ds = Dataset.get_by_name(ws, dataset)\n",
|
||||
"train_ds = train_ds.drop_columns([\"partition_date\"])"
|
||||
]
|
||||
@@ -354,21 +352,22 @@
|
||||
" \"iteration_timeout_minutes\": 10,\n",
|
||||
" \"experiment_timeout_hours\": 0.25,\n",
|
||||
" \"n_cross_validations\": 3,\n",
|
||||
" \"primary_metric\": 'r2_score',\n",
|
||||
" \"primary_metric\": \"r2_score\",\n",
|
||||
" \"max_concurrent_iterations\": 3,\n",
|
||||
" \"max_cores_per_iteration\": -1,\n",
|
||||
" \"verbosity\": logging.INFO,\n",
|
||||
" \"enable_early_stopping\": True\n",
|
||||
" \"enable_early_stopping\": True,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'regression',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
" path = \".\",\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" training_data = train_ds,\n",
|
||||
" label_column_name = target_column_name,\n",
|
||||
" **automl_settings\n",
|
||||
" )"
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"regression\",\n",
|
||||
" debug_log=\"automl_errors.log\",\n",
|
||||
" path=\".\",\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" training_data=train_ds,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" **automl_settings,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -379,17 +378,21 @@
|
||||
"source": [
|
||||
"from azureml.pipeline.core import PipelineData, TrainingOutput\n",
|
||||
"\n",
|
||||
"metrics_output_name = 'metrics_output'\n",
|
||||
"best_model_output_name = 'best_model_output'\n",
|
||||
"metrics_output_name = \"metrics_output\"\n",
|
||||
"best_model_output_name = \"best_model_output\"\n",
|
||||
"\n",
|
||||
"metrics_data = PipelineData(name='metrics_data',\n",
|
||||
" datastore=dstor,\n",
|
||||
" pipeline_output_name=metrics_output_name,\n",
|
||||
" training_output=TrainingOutput(type='Metrics'))\n",
|
||||
"model_data = PipelineData(name='model_data',\n",
|
||||
" datastore=dstor,\n",
|
||||
" pipeline_output_name=best_model_output_name,\n",
|
||||
" training_output=TrainingOutput(type='Model'))"
|
||||
"metrics_data = PipelineData(\n",
|
||||
" name=\"metrics_data\",\n",
|
||||
" datastore=dstor,\n",
|
||||
" pipeline_output_name=metrics_output_name,\n",
|
||||
" training_output=TrainingOutput(type=\"Metrics\"),\n",
|
||||
")\n",
|
||||
"model_data = PipelineData(\n",
|
||||
" name=\"model_data\",\n",
|
||||
" datastore=dstor,\n",
|
||||
" pipeline_output_name=best_model_output_name,\n",
|
||||
" training_output=TrainingOutput(type=\"Model\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -399,10 +402,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_step = AutoMLStep(\n",
|
||||
" name='automl_module',\n",
|
||||
" name=\"automl_module\",\n",
|
||||
" automl_config=automl_config,\n",
|
||||
" outputs=[metrics_data, model_data],\n",
|
||||
" allow_reuse=False)"
|
||||
" allow_reuse=False,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -419,13 +423,22 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"register_model_step = PythonScriptStep(script_name=\"register_model.py\",\n",
|
||||
" name=\"register_model\",\n",
|
||||
" allow_reuse=False,\n",
|
||||
" arguments=[\"--model_name\", model_name, \"--model_path\", model_data, \"--ds_name\", ds_name],\n",
|
||||
" inputs=[model_data],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" runconfig=conda_run_config)"
|
||||
"register_model_step = PythonScriptStep(\n",
|
||||
" script_name=\"register_model.py\",\n",
|
||||
" name=\"register_model\",\n",
|
||||
" allow_reuse=False,\n",
|
||||
" arguments=[\n",
|
||||
" \"--model_name\",\n",
|
||||
" model_name,\n",
|
||||
" \"--model_path\",\n",
|
||||
" model_data,\n",
|
||||
" \"--ds_name\",\n",
|
||||
" ds_name,\n",
|
||||
" ],\n",
|
||||
" inputs=[model_data],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" runconfig=conda_run_config,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -443,8 +456,9 @@
|
||||
"source": [
|
||||
"training_pipeline = Pipeline(\n",
|
||||
" description=\"training_pipeline\",\n",
|
||||
" workspace=ws, \n",
|
||||
" steps=[data_prep_step, automl_step, register_model_step])"
|
||||
" workspace=ws,\n",
|
||||
" steps=[data_prep_step, automl_step, register_model_step],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -453,8 +467,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_pipeline_run = experiment.submit(training_pipeline, pipeline_parameters={\n",
|
||||
" \"ds_name\": dataset, \"model_name\": \"noaaweatherds\"})"
|
||||
"training_pipeline_run = experiment.submit(\n",
|
||||
" training_pipeline,\n",
|
||||
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -483,8 +499,8 @@
|
||||
"pipeline_name = \"Retraining-Pipeline-NOAAWeather\"\n",
|
||||
"\n",
|
||||
"published_pipeline = training_pipeline.publish(\n",
|
||||
" name=pipeline_name, \n",
|
||||
" description=\"Pipeline that retrains AutoML model\")\n",
|
||||
" name=pipeline_name, description=\"Pipeline that retrains AutoML model\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"published_pipeline"
|
||||
]
|
||||
@@ -496,13 +512,17 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Schedule\n",
|
||||
"schedule = Schedule.create(workspace=ws, name=\"RetrainingSchedule\",\n",
|
||||
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
|
||||
" pipeline_id=published_pipeline.id, \n",
|
||||
" experiment_name=experiment_name, \n",
|
||||
" datastore=dstor,\n",
|
||||
" wait_for_provisioning=True,\n",
|
||||
" polling_interval=1440)"
|
||||
"\n",
|
||||
"schedule = Schedule.create(\n",
|
||||
" workspace=ws,\n",
|
||||
" name=\"RetrainingSchedule\",\n",
|
||||
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
|
||||
" pipeline_id=published_pipeline.id,\n",
|
||||
" experiment_name=experiment_name,\n",
|
||||
" datastore=dstor,\n",
|
||||
" wait_for_provisioning=True,\n",
|
||||
" polling_interval=1440,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -526,8 +546,8 @@
|
||||
"pipeline_name = \"DataIngestion-Pipeline-NOAAWeather\"\n",
|
||||
"\n",
|
||||
"published_pipeline = training_pipeline.publish(\n",
|
||||
" name=pipeline_name, \n",
|
||||
" description=\"Pipeline that updates NOAAWeather Dataset\")\n",
|
||||
" name=pipeline_name, description=\"Pipeline that updates NOAAWeather Dataset\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"published_pipeline"
|
||||
]
|
||||
@@ -539,13 +559,17 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Schedule\n",
|
||||
"schedule = Schedule.create(workspace=ws, name=\"RetrainingSchedule-DataIngestion\",\n",
|
||||
" pipeline_parameters={\"ds_name\":dataset},\n",
|
||||
" pipeline_id=published_pipeline.id, \n",
|
||||
" experiment_name=experiment_name, \n",
|
||||
" datastore=dstor,\n",
|
||||
" wait_for_provisioning=True,\n",
|
||||
" polling_interval=1440)"
|
||||
"\n",
|
||||
"schedule = Schedule.create(\n",
|
||||
" workspace=ws,\n",
|
||||
" name=\"RetrainingSchedule-DataIngestion\",\n",
|
||||
" pipeline_parameters={\"ds_name\": dataset},\n",
|
||||
" pipeline_id=published_pipeline.id,\n",
|
||||
" experiment_name=experiment_name,\n",
|
||||
" datastore=dstor,\n",
|
||||
" wait_for_provisioning=True,\n",
|
||||
" polling_interval=1440,\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -556,9 +580,9 @@
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
name: auto-ml-continuous-retraining
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
- azureml-pipeline
|
||||
@@ -31,12 +31,15 @@ try:
|
||||
model = Model(ws, args.model_name)
|
||||
last_train_time = model.created_time
|
||||
print("Model was last trained on {0}.".format(last_train_time))
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
print("Could not get last model train time.")
|
||||
last_train_time = datetime.min.replace(tzinfo=pytz.UTC)
|
||||
|
||||
train_ds = Dataset.get_by_name(ws, args.ds_name)
|
||||
dataset_changed_time = train_ds.data_changed_time
|
||||
dataset_changed_time = train_ds.data_changed_time.replace(tzinfo=pytz.UTC)
|
||||
|
||||
print("dataset_changed_time=" + str(dataset_changed_time))
|
||||
print("last_train_time=" + str(last_train_time))
|
||||
|
||||
if not dataset_changed_time > last_train_time:
|
||||
print("Cancelling run since there is no new data.")
|
||||
|
||||
@@ -25,9 +25,11 @@ datasets = [(Dataset.Scenario.TRAINING, train_ds)]
|
||||
|
||||
# Register model with training dataset
|
||||
|
||||
model = Model.register(workspace=ws,
|
||||
model_path=args.model_path,
|
||||
model_name=args.model_name,
|
||||
datasets=datasets)
|
||||
model = Model.register(
|
||||
workspace=ws,
|
||||
model_path=args.model_path,
|
||||
model_name=args.model_name,
|
||||
datasets=datasets,
|
||||
)
|
||||
|
||||
print("Registered version {0} of model {1}".format(model.version, model.name))
|
||||
|
||||
@@ -16,26 +16,82 @@ if type(run) == _OfflineRun:
|
||||
else:
|
||||
ws = run.experiment.workspace
|
||||
|
||||
usaf_list = ['725724', '722149', '723090', '722159', '723910', '720279',
|
||||
'725513', '725254', '726430', '720381', '723074', '726682',
|
||||
'725486', '727883', '723177', '722075', '723086', '724053',
|
||||
'725070', '722073', '726060', '725224', '725260', '724520',
|
||||
'720305', '724020', '726510', '725126', '722523', '703333',
|
||||
'722249', '722728', '725483', '722972', '724975', '742079',
|
||||
'727468', '722193', '725624', '722030', '726380', '720309',
|
||||
'722071', '720326', '725415', '724504', '725665', '725424',
|
||||
'725066']
|
||||
usaf_list = [
|
||||
"725724",
|
||||
"722149",
|
||||
"723090",
|
||||
"722159",
|
||||
"723910",
|
||||
"720279",
|
||||
"725513",
|
||||
"725254",
|
||||
"726430",
|
||||
"720381",
|
||||
"723074",
|
||||
"726682",
|
||||
"725486",
|
||||
"727883",
|
||||
"723177",
|
||||
"722075",
|
||||
"723086",
|
||||
"724053",
|
||||
"725070",
|
||||
"722073",
|
||||
"726060",
|
||||
"725224",
|
||||
"725260",
|
||||
"724520",
|
||||
"720305",
|
||||
"724020",
|
||||
"726510",
|
||||
"725126",
|
||||
"722523",
|
||||
"703333",
|
||||
"722249",
|
||||
"722728",
|
||||
"725483",
|
||||
"722972",
|
||||
"724975",
|
||||
"742079",
|
||||
"727468",
|
||||
"722193",
|
||||
"725624",
|
||||
"722030",
|
||||
"726380",
|
||||
"720309",
|
||||
"722071",
|
||||
"720326",
|
||||
"725415",
|
||||
"724504",
|
||||
"725665",
|
||||
"725424",
|
||||
"725066",
|
||||
]
|
||||
|
||||
|
||||
def get_noaa_data(start_time, end_time):
|
||||
columns = ['usaf', 'wban', 'datetime', 'latitude', 'longitude', 'elevation',
|
||||
'windAngle', 'windSpeed', 'temperature', 'stationName', 'p_k']
|
||||
columns = [
|
||||
"usaf",
|
||||
"wban",
|
||||
"datetime",
|
||||
"latitude",
|
||||
"longitude",
|
||||
"elevation",
|
||||
"windAngle",
|
||||
"windSpeed",
|
||||
"temperature",
|
||||
"stationName",
|
||||
"p_k",
|
||||
]
|
||||
isd = NoaaIsdWeather(start_time, end_time, cols=columns)
|
||||
noaa_df = isd.to_pandas_dataframe()
|
||||
df_filtered = noaa_df[noaa_df["usaf"].isin(usaf_list)]
|
||||
df_filtered.reset_index(drop=True)
|
||||
print("Received {0} rows of training data between {1} and {2}".format(
|
||||
df_filtered.shape[0], start_time, end_time))
|
||||
print(
|
||||
"Received {0} rows of training data between {1} and {2}".format(
|
||||
df_filtered.shape[0], start_time, end_time
|
||||
)
|
||||
)
|
||||
return df_filtered
|
||||
|
||||
|
||||
@@ -49,41 +105,57 @@ print("Argument 1(ds_name): %s" % args.ds_name)
|
||||
|
||||
dstor = ws.get_default_datastore()
|
||||
register_dataset = False
|
||||
end_time = datetime.utcnow()
|
||||
|
||||
try:
|
||||
ds = Dataset.get_by_name(ws, args.ds_name)
|
||||
end_time_last_slice = ds.data_changed_time.replace(tzinfo=None)
|
||||
print("Dataset {0} last updated on {1}".format(args.ds_name,
|
||||
end_time_last_slice))
|
||||
except Exception as e:
|
||||
print("Dataset {0} last updated on {1}".format(args.ds_name, end_time_last_slice))
|
||||
except Exception:
|
||||
print(traceback.format_exc())
|
||||
print("Dataset with name {0} not found, registering new dataset.".format(args.ds_name))
|
||||
print(
|
||||
"Dataset with name {0} not found, registering new dataset.".format(args.ds_name)
|
||||
)
|
||||
register_dataset = True
|
||||
end_time_last_slice = datetime.today() - relativedelta(weeks=2)
|
||||
end_time = datetime(2021, 5, 1, 0, 0)
|
||||
end_time_last_slice = end_time - relativedelta(weeks=2)
|
||||
|
||||
end_time = datetime.utcnow()
|
||||
train_df = get_noaa_data(end_time_last_slice, end_time)
|
||||
try:
|
||||
train_df = get_noaa_data(end_time_last_slice, end_time)
|
||||
except Exception as ex:
|
||||
print("get_noaa_data failed:", ex)
|
||||
train_df = None
|
||||
|
||||
if train_df.size > 0:
|
||||
print("Received {0} rows of new data after {0}.".format(
|
||||
train_df.shape[0], end_time_last_slice))
|
||||
folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(args.ds_name, end_time.year,
|
||||
end_time.month, end_time.day,
|
||||
end_time.hour, end_time.minute,
|
||||
end_time.second)
|
||||
if train_df is not None and train_df.size > 0:
|
||||
print(
|
||||
"Received {0} rows of new data after {1}.".format(
|
||||
train_df.shape[0], end_time_last_slice
|
||||
)
|
||||
)
|
||||
folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(
|
||||
args.ds_name,
|
||||
end_time.year,
|
||||
end_time.month,
|
||||
end_time.day,
|
||||
end_time.hour,
|
||||
end_time.minute,
|
||||
end_time.second,
|
||||
)
|
||||
file_path = "{0}/data.csv".format(folder_name)
|
||||
|
||||
# Add a new partition to the registered dataset
|
||||
os.makedirs(folder_name, exist_ok=True)
|
||||
train_df.to_csv(file_path, index=False)
|
||||
|
||||
dstor.upload_files(files=[file_path],
|
||||
target_path=folder_name,
|
||||
overwrite=True,
|
||||
show_progress=True)
|
||||
dstor.upload_files(
|
||||
files=[file_path], target_path=folder_name, overwrite=True, show_progress=True
|
||||
)
|
||||
else:
|
||||
print("No new data since {0}.".format(end_time_last_slice))
|
||||
|
||||
if register_dataset:
|
||||
ds = Dataset.Tabular.from_delimited_files(dstor.path("{}/**/*.csv".format(
|
||||
args.ds_name)), partition_format='/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv')
|
||||
ds = Dataset.Tabular.from_delimited_files(
|
||||
dstor.path("{}/**/*.csv".format(args.ds_name)),
|
||||
partition_format="/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv",
|
||||
)
|
||||
ds.register(ws, name=args.ds_name)
|
||||
|
||||
@@ -0,0 +1,92 @@
|
||||
# Experimental Notebooks for Automated ML
|
||||
Notebooks listed in this folder are leveraging experimental features. Namespaces or function signitures may change in future SDK releases. The notebooks published here will reflect the latest supported APIs. All of these notebooks can run on a client-only installation of the Automated ML SDK.
|
||||
The client only installation doesn't contain any of the machine learning libraries, such as scikit-learn, xgboost, or tensorflow, making it much faster to install and is less likely to conflict with any packages in an existing environment. However, since the ML libraries are not available locally, models cannot be downloaded and loaded directly in the client. To replace the functionality of having models locally, these notebooks also demonstrate the ModelProxy feature which will allow you to submit a predict/forecast to the training environment.
|
||||
|
||||
<a name="localconda"></a>
|
||||
## Setup using a Local Conda environment
|
||||
|
||||
To run these notebook on your own notebook server, use these installation instructions.
|
||||
The instructions below will install everything you need and then start a Jupyter notebook.
|
||||
If you would like to use a lighter-weight version of the client that does not install all of the machine learning libraries locally, you can leverage the [experimental notebooks.](experimental/README.md)
|
||||
|
||||
### 1. Install mini-conda from [here](https://conda.io/miniconda.html), choose 64-bit Python 3.8 or higher.
|
||||
- **Note**: if you already have conda installed, you can keep using it but it should be version 4.4.10 or later (as shown by: conda -V). If you have a previous version installed, you can update it using the command: conda update conda.
|
||||
There's no need to install mini-conda specifically.
|
||||
|
||||
### 2. Downloading the sample notebooks
|
||||
- Download the sample notebooks from [GitHub](https://github.com/Azure/MachineLearningNotebooks) as zip and extract the contents to a local directory. The automated ML sample notebooks are in the "automated-machine-learning" folder.
|
||||
|
||||
### 3. Setup a new conda environment
|
||||
The **automl_setup_thin_client** script creates a new conda environment, installs the necessary packages, configures the widget and starts a jupyter notebook. It takes the conda environment name as an optional parameter. The default conda environment name is azure_automl_experimental. The exact command depends on the operating system. See the specific sections below for Windows, Mac and Linux. It can take about 10 minutes to execute.
|
||||
|
||||
Packages installed by the **automl_setup** script:
|
||||
<ul><li>python</li><li>nb_conda</li><li>matplotlib</li><li>numpy</li><li>cython</li><li>urllib3</li><li>pandas</li><li>azureml-sdk</li><li>azureml-widgets</li><li>pandas-ml</li></ul>
|
||||
|
||||
For more details refer to the [automl_env_thin_client.yml](./automl_env_thin_client.yml)
|
||||
## Windows
|
||||
Start an **Anaconda Prompt** window, cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run:
|
||||
```
|
||||
automl_setup_thin_client
|
||||
```
|
||||
## Mac
|
||||
Install "Command line developer tools" if it is not already installed (you can use the command: `xcode-select --install`).
|
||||
|
||||
Start a Terminal windows, cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run:
|
||||
|
||||
```
|
||||
bash automl_setup_thin_client_mac.sh
|
||||
```
|
||||
|
||||
## Linux
|
||||
cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run:
|
||||
|
||||
```
|
||||
bash automl_setup_thin_client_linux.sh
|
||||
```
|
||||
|
||||
### 4. Running configuration.ipynb
|
||||
- Before running any samples you next need to run the configuration notebook. Click on [configuration](../../configuration.ipynb) notebook
|
||||
- Execute the cells in the notebook to Register Machine Learning Services Resource Provider and create a workspace. (*instructions in notebook*)
|
||||
|
||||
### 5. Running Samples
|
||||
- Please make sure you use the Python [conda env:azure_automl_experimental] kernel when trying the sample Notebooks.
|
||||
- Follow the instructions in the individual notebooks to explore various features in automated ML.
|
||||
|
||||
### 6. Starting jupyter notebook manually
|
||||
To start your Jupyter notebook manually, use:
|
||||
|
||||
```
|
||||
conda activate azure_automl
|
||||
jupyter notebook
|
||||
```
|
||||
|
||||
or on Mac or Linux:
|
||||
|
||||
```
|
||||
source activate azure_automl
|
||||
jupyter notebook
|
||||
```
|
||||
|
||||
|
||||
<a name="samples"></a>
|
||||
# Automated ML SDK Sample Notebooks
|
||||
|
||||
- [auto-ml-regression-model-proxy.ipynb](regression-model-proxy/auto-ml-regression-model-proxy.ipynb)
|
||||
- Dataset: Hardware Performance Dataset
|
||||
- Simple example of using automated ML for regression
|
||||
- Uses azure compute for training
|
||||
- Uses ModelProxy for submitting prediction to training environment on azure compute
|
||||
|
||||
<a name="documentation"></a>
|
||||
See [Configure automated machine learning experiments](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-auto-train) to learn how more about the the settings and features available for automated machine learning experiments.
|
||||
|
||||
<a name="pythoncommand"></a>
|
||||
# Running using python command
|
||||
Jupyter notebook provides a File / Download as / Python (.py) option for saving the notebook as a Python file.
|
||||
You can then run this file using the python command.
|
||||
However, on Windows the file needs to be modified before it can be run.
|
||||
The following condition must be added to the main code in the file:
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
The main code of the file must be indented so that it is under this condition.
|
||||
@@ -0,0 +1,346 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Automated Machine Learning - Codegen for AutoFeaturization \n",
|
||||
"_**Autofeaturization of credit card fraudulent transactions dataset on remote compute and codegen functionality**_\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Data](#Data)\n",
|
||||
"1. [Autofeaturization](#Autofeaturization)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id='Introduction'></a>\n",
|
||||
"## Introduction"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Autofeaturization** lets you run an AutoML experiment to only featurize the datasets. These datasets along with the transformer are stored in AML Storage and linked to the run which can later be retrieved and used to train models. \n",
|
||||
"\n",
|
||||
"**To run Autofeaturization, set the number of iterations to zero and featurization as auto.**\n",
|
||||
"\n",
|
||||
"Please refer to [Autofeaturization and custom model training](../autofeaturization-custom-model-training/custom-model-training-from-autofeaturization-run.ipynb) for more details on the same.\n",
|
||||
"\n",
|
||||
"[Codegen](https://github.com/Azure/automl-codegen-preview) is a feature, which when enabled, provides a user with the script of the underlying functionality and a notebook to tweak inputs or code and rerun the same.\n",
|
||||
"\n",
|
||||
"In this example we use the credit card fraudulent transactions dataset to showcase how you can use AutoML for autofeaturization and further how you can enable the `Codegen` feature.\n",
|
||||
"\n",
|
||||
"This notebook is using remote compute to complete the featurization.\n",
|
||||
"\n",
|
||||
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../configuration.ipynb) notebook first if you haven't already, to establish your connection to the AzureML Workspace. \n",
|
||||
"\n",
|
||||
"Here you will learn how to create an autofeaturization experiment using an existing workspace with codegen feature enabled."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id='Setup'></a>\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"import pandas as pd\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"from azureml.train.automl import AutoMLConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.59.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for experiment\n",
|
||||
"experiment_name = 'automl-autofeaturization-ccard-codegen-remote'\n",
|
||||
"\n",
|
||||
"experiment=Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create or Attach existing AmlCompute\n",
|
||||
"A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your CPU cluster\n",
|
||||
"cpu_cluster_name = \"cpu-codegen\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=6)\n",
|
||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id='Data'></a>\n",
|
||||
"## Data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load Data\n",
|
||||
"\n",
|
||||
"Load the credit card fraudulent transactions dataset from a CSV file, containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. \n",
|
||||
"\n",
|
||||
"Here the autofeaturization run will featurize the training data passed in."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"##### Training Dataset"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard_train.csv\"\n",
|
||||
"training_dataset = Dataset.Tabular.from_delimited_files(training_data) # Tabular dataset\n",
|
||||
"\n",
|
||||
"label_column_name = 'Class' # output label"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id='Autofeaturization'></a>\n",
|
||||
"## AutoFeaturization\n",
|
||||
"\n",
|
||||
"Instantiate an AutoMLConfig object. This defines the settings and data used to run the autofeaturization experiment.\n",
|
||||
"\n",
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**task**|classification or regression or forecasting|\n",
|
||||
"|**training_data**|Input training dataset, containing both features and label column.|\n",
|
||||
"|**iterations**|For an autofeaturization run, iterations will be 0.|\n",
|
||||
"|**featurization**|For an autofeaturization run, featurization can be 'auto' or 'custom'.|\n",
|
||||
"|**label_column_name**|The name of the label column.|\n",
|
||||
"|**enable_code_generation**|For enabling codegen for the run, value would be True|\n",
|
||||
"\n",
|
||||
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
" iterations = 0, # autofeaturization run can be triggered by setting iterations to 0\n",
|
||||
" compute_target = compute_target,\n",
|
||||
" training_data = training_dataset,\n",
|
||||
" label_column_name = label_column_name,\n",
|
||||
" featurization = 'auto',\n",
|
||||
" verbosity = logging.INFO,\n",
|
||||
" enable_code_generation = True # enable codegen\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run = experiment.submit(automl_config, show_output = False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Widget for Monitoring Runs\n",
|
||||
"\n",
|
||||
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
|
||||
"\n",
|
||||
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(remote_run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Codegen Script and Notebook"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Codegen script and notebook can be found under the `Outputs + logs` section from the details page of the remote run. Please check for the `autofeaturization_notebook.ipynb` under `/outputs/generated_code`. To modify the featurization code, open `script.py` and make changes. The codegen notebook can be run with the same environment configuration as the above AutoML run."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Experiment Complete!"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "bhavanatumma"
|
||||
}
|
||||
],
|
||||
"interpreter": {
|
||||
"hash": "adb464b67752e4577e3dc163235ced27038d19b7d88def00d75d1975bde5d9ab"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,729 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Automated Machine Learning - AutoFeaturization (Part 1)\n",
|
||||
"_**Autofeaturization of credit card fraudulent transactions dataset on remote compute**_\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Data](#Data)\n",
|
||||
"1. [Autofeaturization](#Autofeaturization)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id='Introduction'></a>\n",
|
||||
"## Introduction"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Autofeaturization is a new feature to let you as the user run an AutoML experiment to only featurize the datasets. These datasets along with the transformer will be stored in the experiment which can later be retrieved and used to train models, either via AutoML or custom training. \n",
|
||||
"\n",
|
||||
"**To run Autofeaturization, pass in zero iterations and featurization as auto. This will featurize the datasets and terminate the experiment. Training will not occur.**\n",
|
||||
"\n",
|
||||
"*Limitations - Sparse data cannot be supported at the moment. Any dataset that has extensive categorical data might be featurized into sparse data which will not be allowed as input to AutoML. Efforts are underway to support sparse data and will be updated soon.* \n",
|
||||
"\n",
|
||||
"In this example we use the credit card fraudulent transactions dataset to showcase how you can use AutoML for autofeaturization. The goal is to clean and featurize the training dataset.\n",
|
||||
"\n",
|
||||
"This notebook is using remote compute to complete the featurization.\n",
|
||||
"\n",
|
||||
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../configuration.ipynb) notebook first if you haven't already, to establish your connection to the AzureML Workspace. \n",
|
||||
"\n",
|
||||
"In the below steps, you will learn how to:\n",
|
||||
"1. Create an autofeaturization experiment using an existing workspace.\n",
|
||||
"2. View the featurized datasets and transformer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id='Setup'></a>\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"import pandas as pd\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"from azureml.train.automl import AutoMLConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.59.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for experiment\n",
|
||||
"experiment_name = 'automl-autofeaturization-ccard-remote'\n",
|
||||
"\n",
|
||||
"experiment=Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create or Attach existing AmlCompute\n",
|
||||
"A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your CPU cluster\n",
|
||||
"cpu_cluster_name = \"cpu-cluster\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=6)\n",
|
||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id='Data'></a>\n",
|
||||
"## Data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load Data\n",
|
||||
"\n",
|
||||
"Load the credit card fraudulent transactions dataset from a CSV file, containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. \n",
|
||||
"\n",
|
||||
"Here the autofeaturization run will featurize the training data passed in."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"##### Training Dataset"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard_train.csv\"\n",
|
||||
"training_dataset = Dataset.Tabular.from_delimited_files(training_data) # Tabular dataset\n",
|
||||
"\n",
|
||||
"label_column_name = 'Class' # output label"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id='Autofeaturization'></a>\n",
|
||||
"## AutoFeaturization\n",
|
||||
"\n",
|
||||
"Instantiate an AutoMLConfig object. This defines the settings and data used to run the autofeaturization experiment.\n",
|
||||
"\n",
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**task**|classification or regression|\n",
|
||||
"|**training_data**|Input training dataset, containing both features and label column.|\n",
|
||||
"|**iterations**|For an autofeaturization run, iterations will be 0.|\n",
|
||||
"|**featurization**|For an autofeaturization run, featurization will be 'auto'.|\n",
|
||||
"|**label_column_name**|The name of the label column.|\n",
|
||||
"\n",
|
||||
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
" iterations = 0, # autofeaturization run can be triggered by setting iterations to 0\n",
|
||||
" compute_target = compute_target,\n",
|
||||
" training_data = training_dataset,\n",
|
||||
" label_column_name = label_column_name,\n",
|
||||
" featurization = 'auto',\n",
|
||||
" verbosity = logging.INFO\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run = experiment.submit(automl_config, show_output = False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Transformer and Featurized Datasets\n",
|
||||
"The given datasets have been featurized and stored under `Outputs + logs` from the details page of the remote run. The structure is shown below. The featurized dataset is stored under `/outputs/featurization/data` and the transformer is saved under `/outputs/featurization/pipeline` \n",
|
||||
"\n",
|
||||
"Below you will learn how to refer to the data saved in your run and retrieve the same."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Widget for Monitoring Runs\n",
|
||||
"\n",
|
||||
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
|
||||
"\n",
|
||||
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(remote_run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Automated Machine Learning - AutoFeaturization (Part 2)\n",
|
||||
"_**Training using a custom model with the featurized data from Autofeaturization run of credit card fraudulent transactions dataset**_\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Data Setup](#DataSetup)\n",
|
||||
"1. [Autofeaturization Data](#AutofeaturizationData)\n",
|
||||
"1. [Train](#Train)\n",
|
||||
"1. [Results](#Results)\n",
|
||||
"1. [Test](#Test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id='Introduction'></a>\n",
|
||||
"## Introduction\n",
|
||||
"\n",
|
||||
"Here we use the featurized dataset saved in the above run to showcase how you can perform custom training by using the transformer from an autofeaturization run to transform validation / test datasets. \n",
|
||||
"\n",
|
||||
"The goal is to use autofeaturized run data and transformer to transform and run a custom training experiment independently\n",
|
||||
"\n",
|
||||
"In the below steps, you will learn how to:\n",
|
||||
"1. Read transformer from a completed autofeaturization run and transform data\n",
|
||||
"2. Pull featurized data from a completed autofeaturization run\n",
|
||||
"3. Run a custom training experiment with the above data\n",
|
||||
"4. Check results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id='DataSetup'></a>\n",
|
||||
"## Data Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will load the featurized training data and also load the transformer from the above autofeaturized run. This transformer can then be used to transform the test data to check the accuracy of the custom model after training."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load Test Data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"load test dataset from CSV and split into X and y columns to featurize with the transformer going forward."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard_test.csv\"\n",
|
||||
"\n",
|
||||
"test_dataset = pd.read_csv(test_data)\n",
|
||||
"label_column_name = 'Class'\n",
|
||||
"\n",
|
||||
"X_test_data = test_dataset[test_dataset.columns.difference([label_column_name])]\n",
|
||||
"y_test_data = test_dataset[label_column_name].values\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load data_transformer from the above remote run artifact"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### (Method 1)\n",
|
||||
"\n",
|
||||
"Method 1 allows you to read the transformer from the remote storage."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import mlflow\n",
|
||||
"mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())\n",
|
||||
"\n",
|
||||
"# Set uri to fetch data transformer from remote parent run.\n",
|
||||
"artifact_path = \"/outputs/featurization/pipeline/\"\n",
|
||||
"uri = \"runs:/\" + remote_run.id + artifact_path\n",
|
||||
"\n",
|
||||
"print(uri)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### (Method 2)\n",
|
||||
"\n",
|
||||
"Method 2 downloads the transformer to the local directory and then can be used to transform the data. Uncomment to use."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"''' import pathlib\n",
|
||||
"\n",
|
||||
"# Download the transformer to the local directory\n",
|
||||
"transformers_file_path = \"/outputs/featurization/pipeline/\"\n",
|
||||
"local_path = \"./transformer\"\n",
|
||||
"remote_run.download_files(prefix=transformers_file_path, output_directory=local_path, batch_size=500)\n",
|
||||
"\n",
|
||||
"path = pathlib.Path(\"transformer\") \n",
|
||||
"path = str(path.absolute()) + transformers_file_path\n",
|
||||
"str_uri = \"file:///\" + path\n",
|
||||
"\n",
|
||||
"print(str_uri) '''"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Transform Data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Note:** Not all datasets produce a y_transformer. The dataset used in the current notebook requires a transformer as the y column data is categorical. \n",
|
||||
"\n",
|
||||
"We will go ahead and download the mlflow transformer model and use it to transform test data that can be used for further experimentation below. To run the commented code, make sure the environment requirement is satisfied. You can go ahead and create the environment from the `conda.yaml` file under `/outputs/featurization/pipeline/` and run the given code in it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"''' from azureml.automl.core.shared.constants import Transformers\n",
|
||||
"\n",
|
||||
"transformers = mlflow.sklearn.load_model(uri) # Using method 1\n",
|
||||
"data_transformers = transformers.get_transformers()\n",
|
||||
"x_transformer = data_transformers[Transformers.X_TRANSFORMER]\n",
|
||||
"y_transformer = data_transformers[Transformers.Y_TRANSFORMER]\n",
|
||||
"\n",
|
||||
"X_test = x_transformer.transform(X_test_data)\n",
|
||||
"y_test = y_transformer.transform(y_test_data) '''"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Run the following cell to see the featurization summary of X and y transformers. Uncomment to use. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"''' X_data_summary = x_transformer.get_featurization_summary(is_user_friendly=False)\n",
|
||||
"\n",
|
||||
"summary_df = pd.DataFrame.from_records(X_data_summary)\n",
|
||||
"summary_df '''"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load Datastore\n",
|
||||
"\n",
|
||||
"The below data store holds the featurized datasets, hence we load and access the data. Check the path and file names according to the saved structure in your experiment `Outputs + logs` as seen in <i>Autofeaturization Part 1</i>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.datastore import Datastore\n",
|
||||
"\n",
|
||||
"ds = Datastore.get(ws, \"workspaceartifactstore\")\n",
|
||||
"experiment_loc = \"ExperimentRun/dcid.\" + remote_run.id\n",
|
||||
"\n",
|
||||
"remote_data_path = \"/outputs/featurization/data/\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id='AutofeaturizationData'></a>\n",
|
||||
"## Autofeaturization Data\n",
|
||||
"\n",
|
||||
"We will load the training data from the previously completed Autofeaturization experiment. The resulting featurized dataframe can be passed into the custom model for training. Here we are saving the file to local from the experiment storage and reading the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train_data_file_path = \"full_training_dataset.df.parquet\"\n",
|
||||
"local_data_path = \"./data/\" + train_data_file_path\n",
|
||||
"\n",
|
||||
"remote_run.download_file(remote_data_path + train_data_file_path, local_data_path)\n",
|
||||
"\n",
|
||||
"full_training_data = pd.read_parquet(local_data_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Another way to load the data is to go to the above autofeaturization experiment and check for the featurized dataset ids under `Output datasets`. Uncomment and replace them accordingly below, to use."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# train_data = Dataset.get_by_id(ws, 'cb4418ee-bac4-45ac-b055-600653bdf83a') # replace the featurized full_training_dataset id\n",
|
||||
"# full_training_data = train_data.to_pandas_dataframe()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Training Data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We are dropping the y column and weights column from the featurized training dataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"Y_COLUMN = \"automl_y\"\n",
|
||||
"SW_COLUMN = \"automl_weights\"\n",
|
||||
"\n",
|
||||
"X_train = full_training_data[full_training_data.columns.difference([Y_COLUMN, SW_COLUMN])]\n",
|
||||
"y_train = full_training_data[Y_COLUMN].values\n",
|
||||
"sample_weight = full_training_data[SW_COLUMN].values"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id='Train'></a>\n",
|
||||
"## Train"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we are passing our training data to the lightgbm classifier, any custom model can be used with your data. Let us first install lightgbm."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install lightgbm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import lightgbm as lgb\n",
|
||||
"\n",
|
||||
"model = lgb.LGBMClassifier(learning_rate=0.08,max_depth=-5,random_state=42)\n",
|
||||
"model.fit(X_train, y_train, sample_weight=sample_weight)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Once training is done, the test data obtained after transforming from the above downloaded transformer can be used to calculate the accuracy "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print('Training accuracy {:.4f}'.format(model.score(X_train, y_train)))\n",
|
||||
"\n",
|
||||
"# Uncomment below to test the model on test data \n",
|
||||
"# print('Testing accuracy {:.4f}'.format(model.score(X_test, y_test)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id='Results'></a>\n",
|
||||
"## Analyze results\n",
|
||||
"\n",
|
||||
"### Retrieve the Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a id='Test'></a>\n",
|
||||
"## Test the fitted model\n",
|
||||
"\n",
|
||||
"Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Uncomment below to test the model on test data\n",
|
||||
"# y_pred = model.predict(X_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Experiment Complete!"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "bhavanatumma"
|
||||
}
|
||||
],
|
||||
"interpreter": {
|
||||
"hash": "adb464b67752e4577e3dc163235ced27038d19b7d88def00d75d1975bde5d9ab"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
@echo off
|
||||
set conda_env_name=%1
|
||||
set automl_env_file=%2
|
||||
set options=%3
|
||||
set PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||
|
||||
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl_experimental"
|
||||
IF "%automl_env_file%"=="" SET automl_env_file="automl_thin_client_env.yml"
|
||||
|
||||
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
||||
|
||||
IF "%CONDA_EXE%"=="" GOTO CondaMissing
|
||||
|
||||
call conda activate %conda_env_name% 2>nul:
|
||||
|
||||
if not errorlevel 1 (
|
||||
echo Upgrading existing conda environment %conda_env_name%
|
||||
call pip uninstall azureml-train-automl -y -q
|
||||
call conda env update --name %conda_env_name% --file %automl_env_file%
|
||||
if errorlevel 1 goto ErrorExit
|
||||
) else (
|
||||
call conda env create -f %automl_env_file% -n %conda_env_name%
|
||||
)
|
||||
|
||||
call conda activate %conda_env_name% 2>nul:
|
||||
if errorlevel 1 goto ErrorExit
|
||||
|
||||
call python -m ipykernel install --user --name %conda_env_name% --display-name "Python (%conda_env_name%)"
|
||||
|
||||
REM azureml.widgets is now installed as part of the pip install under the conda env.
|
||||
REM Removing the old user install so that the notebooks will use the latest widget.
|
||||
call jupyter nbextension uninstall --user --py azureml.widgets
|
||||
|
||||
echo.
|
||||
echo.
|
||||
echo ***************************************
|
||||
echo * AutoML setup completed successfully *
|
||||
echo ***************************************
|
||||
IF NOT "%options%"=="nolaunch" (
|
||||
echo.
|
||||
echo Starting jupyter notebook - please run the configuration notebook
|
||||
echo.
|
||||
jupyter notebook --log-level=50 --notebook-dir='..\..'
|
||||
)
|
||||
|
||||
goto End
|
||||
|
||||
:CondaMissing
|
||||
echo Please run this script from an Anaconda Prompt window.
|
||||
echo You can start an Anaconda Prompt window by
|
||||
echo typing Anaconda Prompt on the Start menu.
|
||||
echo If you don't see the Anaconda Prompt app, install Miniconda.
|
||||
echo If you are running an older version of Miniconda or Anaconda,
|
||||
echo you can upgrade using the command: conda update conda
|
||||
goto End
|
||||
|
||||
:YmlMissing
|
||||
echo File %automl_env_file% not found.
|
||||
|
||||
:ErrorExit
|
||||
echo Install failed
|
||||
|
||||
:End
|
||||
@@ -0,0 +1,53 @@
|
||||
#!/bin/bash
|
||||
|
||||
CONDA_ENV_NAME=$1
|
||||
AUTOML_ENV_FILE=$2
|
||||
OPTIONS=$3
|
||||
PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||
|
||||
if [ "$CONDA_ENV_NAME" == "" ]
|
||||
then
|
||||
CONDA_ENV_NAME="azure_automl_experimental"
|
||||
fi
|
||||
|
||||
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||
then
|
||||
AUTOML_ENV_FILE="automl_thin_client_env.yml"
|
||||
fi
|
||||
|
||||
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||
echo "File $AUTOML_ENV_FILE not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if source activate $CONDA_ENV_NAME 2> /dev/null
|
||||
then
|
||||
echo "Upgrading existing conda environment" $CONDA_ENV_NAME
|
||||
pip uninstall azureml-train-automl -y -q
|
||||
conda env update --name $CONDA_ENV_NAME --file $AUTOML_ENV_FILE &&
|
||||
jupyter nbextension uninstall --user --py azureml.widgets
|
||||
else
|
||||
conda env create -f $AUTOML_ENV_FILE -n $CONDA_ENV_NAME &&
|
||||
source activate $CONDA_ENV_NAME &&
|
||||
python -m ipykernel install --user --name $CONDA_ENV_NAME --display-name "Python ($CONDA_ENV_NAME)" &&
|
||||
jupyter nbextension uninstall --user --py azureml.widgets &&
|
||||
echo "" &&
|
||||
echo "" &&
|
||||
echo "***************************************" &&
|
||||
echo "* AutoML setup completed successfully *" &&
|
||||
echo "***************************************" &&
|
||||
if [ "$OPTIONS" != "nolaunch" ]
|
||||
then
|
||||
echo "" &&
|
||||
echo "Starting jupyter notebook - please run the configuration notebook" &&
|
||||
echo "" &&
|
||||
jupyter notebook --log-level=50 --notebook-dir '../..'
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $? -gt 0 ]
|
||||
then
|
||||
echo "Installation failed"
|
||||
fi
|
||||
|
||||
|
||||
@@ -0,0 +1,55 @@
|
||||
#!/bin/bash
|
||||
|
||||
CONDA_ENV_NAME=$1
|
||||
AUTOML_ENV_FILE=$2
|
||||
OPTIONS=$3
|
||||
PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||
|
||||
if [ "$CONDA_ENV_NAME" == "" ]
|
||||
then
|
||||
CONDA_ENV_NAME="azure_automl_experimental"
|
||||
fi
|
||||
|
||||
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||
then
|
||||
AUTOML_ENV_FILE="automl_thin_client_env_mac.yml"
|
||||
fi
|
||||
|
||||
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||
echo "File $AUTOML_ENV_FILE not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if source activate $CONDA_ENV_NAME 2> /dev/null
|
||||
then
|
||||
echo "Upgrading existing conda environment" $CONDA_ENV_NAME
|
||||
pip uninstall azureml-train-automl -y -q
|
||||
conda env update --name $CONDA_ENV_NAME --file $AUTOML_ENV_FILE &&
|
||||
jupyter nbextension uninstall --user --py azureml.widgets
|
||||
else
|
||||
conda env create -f $AUTOML_ENV_FILE -n $CONDA_ENV_NAME &&
|
||||
source activate $CONDA_ENV_NAME &&
|
||||
conda install lightgbm -c conda-forge -y &&
|
||||
python -m ipykernel install --user --name $CONDA_ENV_NAME --display-name "Python ($CONDA_ENV_NAME)" &&
|
||||
jupyter nbextension uninstall --user --py azureml.widgets &&
|
||||
echo "" &&
|
||||
echo "" &&
|
||||
echo "***************************************" &&
|
||||
echo "* AutoML setup completed successfully *" &&
|
||||
echo "***************************************" &&
|
||||
if [ "$OPTIONS" != "nolaunch" ]
|
||||
then
|
||||
echo "" &&
|
||||
echo "Starting jupyter notebook - please run the configuration notebook" &&
|
||||
echo "" &&
|
||||
jupyter notebook --log-level=50 --notebook-dir '../..'
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $? -gt 0 ]
|
||||
then
|
||||
echo "Installation failed"
|
||||
fi
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
name: azure_automl_experimental
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.7.0 and later.
|
||||
- pip<=22.3.1
|
||||
- python>=3.7.0,<3.11
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-defaults
|
||||
- azureml-sdk
|
||||
- azureml-widgets
|
||||
- azureml-mlflow
|
||||
- pandas
|
||||
- mlflow
|
||||
@@ -0,0 +1,24 @@
|
||||
name: azure_automl_experimental
|
||||
channels:
|
||||
- conda-forge
|
||||
- main
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.7.0 and later.
|
||||
- pip<=20.2.4
|
||||
- nomkl
|
||||
- python>=3.7.0,<3.11
|
||||
- urllib3==1.26.7
|
||||
- PyJWT < 2.0.0
|
||||
- numpy>=1.21.6,<=1.22.3
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azure-core==1.24.1
|
||||
- azure-identity==1.7.0
|
||||
- azureml-defaults
|
||||
- azureml-sdk
|
||||
- azureml-widgets
|
||||
- azureml-mlflow
|
||||
- pandas
|
||||
- mlflow
|
||||
@@ -0,0 +1,470 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Automated Machine Learning\n",
|
||||
"_**Regression with Aml Compute**_\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Data](#Data)\n",
|
||||
"1. [Train](#Train)\n",
|
||||
"1. [Results](#Results)\n",
|
||||
"1. [Test](#Test)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"In this example we use an experimental feature, Model Proxy, to do a predict on the best generated model without downloading the model locally. The prediction will happen on same compute and environment that was used to train the model. This feature is currently in the experimental state, which means that the API is prone to changing, please make sure to run on the latest version of this notebook if you face any issues.\n",
|
||||
"This notebook will also leverage MLFlow for saving models, allowing for more portability of the resulting models. See https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-mlflow for more details around MLFlow is AzureML.\n",
|
||||
"\n",
|
||||
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
|
||||
"\n",
|
||||
"In this notebook you will learn how to:\n",
|
||||
"1. Create an `Experiment` in an existing `Workspace`.\n",
|
||||
"2. Configure AutoML using `AutoMLConfig`.\n",
|
||||
"3. Train the model using remote compute.\n",
|
||||
"4. Explore the results.\n",
|
||||
"5. Test the best fitted model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"from azureml.train.automl import AutoMLConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.59.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# Choose a name for the experiment.\n",
|
||||
"experiment_name = 'automl-regression-model-proxy'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your CPU cluster\n",
|
||||
"# Try to ensure that the cluster name is unique across the notebooks\n",
|
||||
"cpu_cluster_name = \"reg-model-proxy\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=4)\n",
|
||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load Data\n",
|
||||
"Load the hardware dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/machineData.csv\"\n",
|
||||
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
||||
"\n",
|
||||
"# Split the dataset into train and test datasets\n",
|
||||
"train_data, test_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
||||
"\n",
|
||||
"label = \"ERP\"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The split data will be used in the remote compute by ModelProxy and locally to compare results.\n",
|
||||
"So, we need to persist the split data to avoid descrepencies from different package versions in the local and remote."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ds = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"train_data = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||
" train_data.to_pandas_dataframe(), target=(ds, \"machineTrainData\"), name=\"train_data\")\n",
|
||||
"\n",
|
||||
"test_data = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||
" test_data.to_pandas_dataframe(), target=(ds, \"machineTestData\"), name=\"test_data\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train\n",
|
||||
"\n",
|
||||
"Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.\n",
|
||||
"\n",
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**task**|classification, regression or forecasting|\n",
|
||||
"|**primary_metric**|This is the metric that you want to optimize. Regression supports the following primary metrics: <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|**training_data**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**label_column_name**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
|
||||
"\n",
|
||||
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"automlconfig-remarks-sample"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"n_cross_validations\": 3,\n",
|
||||
" \"primary_metric\": 'r2_score',\n",
|
||||
" \"enable_early_stopping\": True, \n",
|
||||
" \"experiment_timeout_hours\": 0.3, #for real scenarios we recommend a timeout of at least one hour \n",
|
||||
" \"max_concurrent_iterations\": 4,\n",
|
||||
" \"max_cores_per_iteration\": -1,\n",
|
||||
" \"verbosity\": logging.INFO,\n",
|
||||
" \"save_mlflow\": True,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'regression',\n",
|
||||
" compute_target = compute_target,\n",
|
||||
" training_data = train_data,\n",
|
||||
" label_column_name = label,\n",
|
||||
" **automl_settings\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the `submit` method on the experiment object and pass the run configuration. Execution of remote runs is asynchronous. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run = experiment.submit(automl_config, show_output = False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# If you need to retrieve a run that already started, use the following code\n",
|
||||
"#from azureml.train.automl.run import AutoMLRun\n",
|
||||
"#remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve the Best Child Run\n",
|
||||
"\n",
|
||||
"Below we select the best pipeline from our iterations. The `get_best_child` method returns the best run. Overloads on `get_best_child` allow you to retrieve the best run for *any* logged metric."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run = remote_run.get_best_child()\n",
|
||||
"print(best_run)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Show hyperparameters\n",
|
||||
"Show the model pipeline used for the best run with its hyperparameters.\n",
|
||||
"For ensemble pipelines it shows the iterations and algorithms that are ensembled."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run_properties = best_run.get_details()['properties']\n",
|
||||
"pipeline_script = json.loads(run_properties['pipeline_script'])\n",
|
||||
"print(json.dumps(pipeline_script, indent = 1)) \n",
|
||||
"\n",
|
||||
"if 'ensembled_iterations' in run_properties:\n",
|
||||
" print(\"\")\n",
|
||||
" print(\"Ensembled Iterations\")\n",
|
||||
" print(run_properties['ensembled_iterations'])\n",
|
||||
" \n",
|
||||
"if 'ensembled_algorithms' in run_properties:\n",
|
||||
" print(\"\")\n",
|
||||
" print(\"Ensembled Algorithms\")\n",
|
||||
" print(run_properties['ensembled_algorithms'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Best Child Run Based on Any Other Metric\n",
|
||||
"Show the run and the model that has the smallest `root_mean_squared_error` value (which turned out to be the same as the one with largest `spearman_correlation` value):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"lookup_metric = \"root_mean_squared_error\"\n",
|
||||
"best_run = remote_run.get_best_child(metric = lookup_metric)\n",
|
||||
"print(best_run)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_test = test_data.keep_columns('ERP')\n",
|
||||
"test_data = test_data.drop_columns('ERP')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"y_train = train_data.keep_columns('ERP')\n",
|
||||
"train_data = train_data.drop_columns('ERP')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Creating ModelProxy for submitting prediction runs to the training environment.\n",
|
||||
"We will create a ModelProxy for the best child run, which will allow us to submit a run that does the prediction in the training environment. Unlike the local client, which can have different versions of some libraries, the training environment will have all the compatible libraries for the model already."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.model_proxy import ModelProxy\n",
|
||||
"best_model_proxy = ModelProxy(best_run)\n",
|
||||
"y_pred_train = best_model_proxy.predict(train_data)\n",
|
||||
"y_pred_test = best_model_proxy.predict(test_data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Exploring results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_pred_train = y_pred_train.to_pandas_dataframe().values.flatten()\n",
|
||||
"y_train = y_train.to_pandas_dataframe().values.flatten()\n",
|
||||
"y_residual_train = y_train - y_pred_train\n",
|
||||
"\n",
|
||||
"y_pred_test = y_pred_test.to_pandas_dataframe().values.flatten()\n",
|
||||
"y_test = y_test.to_pandas_dataframe().values.flatten()\n",
|
||||
"y_residual_test = y_test - y_pred_test\n",
|
||||
"print(y_residual_train)\n",
|
||||
"print(y_residual_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "sekrupa"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"how-to-use-azureml",
|
||||
"automated-machine-learning"
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
After Width: | Height: | Size: 22 KiB |
@@ -0,0 +1,174 @@
|
||||
from typing import Any, Dict, Optional, List
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from matplotlib import pyplot as plt
|
||||
from matplotlib.backends.backend_pdf import PdfPages
|
||||
|
||||
from azureml.automl.core.shared import constants
|
||||
from azureml.automl.core.shared.types import GrainType
|
||||
from azureml.automl.runtime.shared.score import scoring
|
||||
|
||||
GRAIN = "time_series_id"
|
||||
BACKTEST_ITER = "backtest_iteration"
|
||||
ACTUALS = "actual_level"
|
||||
PREDICTIONS = "predicted_level"
|
||||
ALL_GRAINS = "all_sets"
|
||||
|
||||
FORECASTS_FILE = "forecast.csv"
|
||||
SCORES_FILE = "scores.csv"
|
||||
PLOTS_FILE = "plots_fcst_vs_actual.pdf"
|
||||
RE_INVALID_SYMBOLS = re.compile("[: ]")
|
||||
|
||||
|
||||
def _compute_metrics(df: pd.DataFrame, metrics: List[str]):
|
||||
"""
|
||||
Compute metrics for one data frame.
|
||||
|
||||
:param df: The data frame which contains actual_level and predicted_level columns.
|
||||
:return: The data frame with two columns - metric_name and metric.
|
||||
"""
|
||||
scores = scoring.score_regression(
|
||||
y_test=df[ACTUALS], y_pred=df[PREDICTIONS], metrics=metrics
|
||||
)
|
||||
metrics_df = pd.DataFrame(list(scores.items()), columns=["metric_name", "metric"])
|
||||
metrics_df.sort_values(["metric_name"], inplace=True)
|
||||
metrics_df.reset_index(drop=True, inplace=True)
|
||||
return metrics_df
|
||||
|
||||
|
||||
def _format_grain_name(grain: GrainType) -> str:
|
||||
"""
|
||||
Convert grain name to string.
|
||||
|
||||
:param grain: the grain name.
|
||||
:return: the string representation of the given grain.
|
||||
"""
|
||||
if not isinstance(grain, tuple) and not isinstance(grain, list):
|
||||
return str(grain)
|
||||
grain = list(map(str, grain))
|
||||
return "|".join(grain)
|
||||
|
||||
|
||||
def compute_all_metrics(
|
||||
fcst_df: pd.DataFrame,
|
||||
ts_id_colnames: List[str],
|
||||
metric_names: Optional[List[set]] = None,
|
||||
):
|
||||
"""
|
||||
Calculate metrics per grain.
|
||||
|
||||
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
|
||||
:param metric_names: (optional) the list of metric names to return
|
||||
:param ts_id_colnames: (optional) list of grain column names
|
||||
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
|
||||
"""
|
||||
if not metric_names:
|
||||
metric_names = list(constants.Metric.SCALAR_REGRESSION_SET)
|
||||
|
||||
if ts_id_colnames is None:
|
||||
ts_id_colnames = []
|
||||
|
||||
metrics_list = []
|
||||
if ts_id_colnames:
|
||||
for grain, df in fcst_df.groupby(ts_id_colnames):
|
||||
one_grain_metrics_df = _compute_metrics(df, metric_names)
|
||||
one_grain_metrics_df[GRAIN] = _format_grain_name(grain)
|
||||
metrics_list.append(one_grain_metrics_df)
|
||||
|
||||
# overall metrics
|
||||
one_grain_metrics_df = _compute_metrics(fcst_df, metric_names)
|
||||
one_grain_metrics_df[GRAIN] = ALL_GRAINS
|
||||
metrics_list.append(one_grain_metrics_df)
|
||||
|
||||
# collect into a data frame
|
||||
return pd.concat(metrics_list)
|
||||
|
||||
|
||||
def _draw_one_plot(
|
||||
df: pd.DataFrame,
|
||||
time_column_name: str,
|
||||
grain_column_names: List[str],
|
||||
pdf: PdfPages,
|
||||
) -> None:
|
||||
"""
|
||||
Draw the single plot.
|
||||
|
||||
:param df: The data frame with the data to build plot.
|
||||
:param time_column_name: The name of a time column.
|
||||
:param grain_column_names: The name of grain columns.
|
||||
:param pdf: The pdf backend used to render the plot.
|
||||
"""
|
||||
fig, _ = plt.subplots(figsize=(20, 10))
|
||||
df = df.set_index(time_column_name)
|
||||
plt.plot(df[[ACTUALS, PREDICTIONS]])
|
||||
plt.xticks(rotation=45)
|
||||
iteration = df[BACKTEST_ITER].iloc[0]
|
||||
if grain_column_names:
|
||||
grain_name = [df[grain].iloc[0] for grain in grain_column_names]
|
||||
plt.title(f"Time series ID: {_format_grain_name(grain_name)} {iteration}")
|
||||
plt.legend(["actual", "forecast"])
|
||||
plt.close(fig)
|
||||
pdf.savefig(fig)
|
||||
|
||||
|
||||
def calculate_scores_and_build_plots(
|
||||
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
|
||||
):
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
grains = automl_settings.get(
|
||||
constants.TimeSeries.TIME_SERIES_ID_COLUMN_NAMES,
|
||||
automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES, None),
|
||||
)
|
||||
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
|
||||
if grains is None:
|
||||
grains = []
|
||||
if isinstance(grains, str):
|
||||
grains = [grains]
|
||||
while BACKTEST_ITER in grains:
|
||||
grains.remove(BACKTEST_ITER)
|
||||
|
||||
dfs = []
|
||||
for fle in os.listdir(input_dir):
|
||||
file_path = os.path.join(input_dir, fle)
|
||||
if os.path.isfile(file_path) and file_path.endswith(".csv"):
|
||||
df_iter = pd.read_csv(file_path, parse_dates=[time_column_name])
|
||||
for _, iteration in df_iter.groupby(BACKTEST_ITER):
|
||||
dfs.append(iteration)
|
||||
forecast_df = pd.concat(dfs, sort=False, ignore_index=True)
|
||||
# To make sure plots are in order, sort the predictions by grain and iteration.
|
||||
ts_index = grains + [BACKTEST_ITER]
|
||||
forecast_df.sort_values(by=ts_index, inplace=True)
|
||||
pdf = PdfPages(os.path.join(output_dir, PLOTS_FILE))
|
||||
for _, one_forecast in forecast_df.groupby(ts_index):
|
||||
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
|
||||
pdf.close()
|
||||
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
|
||||
# Remove np.NaN and np.inf from the prediction and actuals data.
|
||||
forecast_df.replace([np.inf, -np.inf], np.nan, inplace=True)
|
||||
forecast_df.dropna(subset=[ACTUALS, PREDICTIONS], inplace=True)
|
||||
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
|
||||
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = {"forecasts": "--forecasts", "scores_out": "--output-dir"}
|
||||
parser = argparse.ArgumentParser("Parsing input arguments.")
|
||||
for argname, arg in args.items():
|
||||
parser.add_argument(arg, dest=argname, required=True)
|
||||
parsed_args, _ = parser.parse_known_args()
|
||||
input_dir = parsed_args.forecasts
|
||||
output_dir = parsed_args.scores_out
|
||||
with open(
|
||||
os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
|
||||
)
|
||||
) as json_file:
|
||||
automl_settings = json.load(json_file)
|
||||
calculate_scores_and_build_plots(input_dir, output_dir, automl_settings)
|
||||
@@ -0,0 +1,779 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Many Models with Backtesting - Automated ML\n",
|
||||
"**_Backtest many models time series forecasts with Automated Machine Learning_**\n",
|
||||
"\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For this notebook we are using a synthetic dataset to demonstrate the back testing in many model scenario. This allows us to check historical performance of AutoML on a historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n",
|
||||
"\n",
|
||||
"Thus, it is a quick way of evaluating AutoML as if it was in production. Here, we do not test historical performance of a particular model, for this see the [notebook](../forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb). Instead, the best model for every backtest iteration can be different since AutoML chooses the best model for a given training set.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"You'll need to create a compute Instance by following [these](https://learn.microsoft.com/en-us/azure/machine-learning/v1/how-to-create-manage-compute-instance?tabs=python) instructions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1.0 Set up workspace, datastore, experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003526897
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Datastore\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"from pandas.tseries.frequencies import to_offset\n",
|
||||
"\n",
|
||||
"# Set up your workspace\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"ws.get_details()\n",
|
||||
"\n",
|
||||
"# Set up your datastores\n",
|
||||
"dstore = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output[\"SDK version\"] = azureml.core.VERSION\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Default datastore name\"] = dstore.name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook is compatible with Azure ML SDK version 1.35.1 or later."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose an experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003540729
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, \"automl-many-models-backtest\")\n",
|
||||
"\n",
|
||||
"print(\"Experiment name: \" + experiment.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2.0 Data\n",
|
||||
"\n",
|
||||
"#### 2.1 Data generation\n",
|
||||
"For this notebook we will generate the artificial data set with two [time series IDs](https://docs.microsoft.com/en-us/python/api/azureml-automl-core/azureml.automl.core.forecasting_parameters.forecastingparameters?view=azure-ml-py). Then we will generate backtest folds and will upload it to the default BLOB storage and create a [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# simulate data: 2 grains - 700\n",
|
||||
"TIME_COLNAME = \"date\"\n",
|
||||
"TARGET_COLNAME = \"value\"\n",
|
||||
"TIME_SERIES_ID_COLNAME = \"ts_id\"\n",
|
||||
"\n",
|
||||
"sample_size = 700\n",
|
||||
"# Set the random seed for reproducibility of results.\n",
|
||||
"np.random.seed(20)\n",
|
||||
"X1 = pd.DataFrame(\n",
|
||||
" {\n",
|
||||
" TIME_COLNAME: pd.date_range(start=\"2018-01-01\", periods=sample_size),\n",
|
||||
" TARGET_COLNAME: np.random.normal(loc=100, scale=20, size=sample_size),\n",
|
||||
" TIME_SERIES_ID_COLNAME: \"ts_A\",\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"X2 = pd.DataFrame(\n",
|
||||
" {\n",
|
||||
" TIME_COLNAME: pd.date_range(start=\"2018-01-01\", periods=sample_size),\n",
|
||||
" TARGET_COLNAME: np.random.normal(loc=100, scale=20, size=sample_size),\n",
|
||||
" TIME_SERIES_ID_COLNAME: \"ts_B\",\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"X = pd.concat([X1, X2], ignore_index=True, sort=False)\n",
|
||||
"print(\"Simulated dataset contains {} rows \\n\".format(X.shape[0]))\n",
|
||||
"X.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we will generate 8 backtesting folds with backtesting period of 7 days and with the same forecasting horizon. We will add the column \"backtest_iteration\", which will identify the backtesting period by the last training date."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"offset_type = \"7D\"\n",
|
||||
"NUMBER_OF_BACKTESTS = 8 # number of train/test sets to generate\n",
|
||||
"\n",
|
||||
"dfs_train = []\n",
|
||||
"dfs_test = []\n",
|
||||
"for ts_id, df_one in X.groupby(TIME_SERIES_ID_COLNAME):\n",
|
||||
"\n",
|
||||
" data_end = df_one[TIME_COLNAME].max()\n",
|
||||
"\n",
|
||||
" for i in range(NUMBER_OF_BACKTESTS):\n",
|
||||
" train_cutoff_date = data_end - to_offset(offset_type)\n",
|
||||
" df_one = df_one.copy()\n",
|
||||
" df_one[\"backtest_iteration\"] = \"iteration_\" + str(train_cutoff_date)\n",
|
||||
" train = df_one[df_one[TIME_COLNAME] <= train_cutoff_date]\n",
|
||||
" test = df_one[\n",
|
||||
" (df_one[TIME_COLNAME] > train_cutoff_date)\n",
|
||||
" & (df_one[TIME_COLNAME] <= data_end)\n",
|
||||
" ]\n",
|
||||
" data_end = train[TIME_COLNAME].max()\n",
|
||||
" dfs_train.append(train)\n",
|
||||
" dfs_test.append(test)\n",
|
||||
"\n",
|
||||
"X_train = pd.concat(dfs_train, sort=False, ignore_index=True)\n",
|
||||
"X_test = pd.concat(dfs_test, sort=False, ignore_index=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 2.2 Create the Tabular Data Set.\n",
|
||||
"\n",
|
||||
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
|
||||
"\n",
|
||||
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.\n",
|
||||
"\n",
|
||||
"In this next step, we will upload the data and create a TabularDataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||
"\n",
|
||||
"ds = ws.get_default_datastore()\n",
|
||||
"# Upload saved data to the default data store.\n",
|
||||
"train_data = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" X_train, target=(ds, \"data_mm\"), name=\"data_train\"\n",
|
||||
")\n",
|
||||
"test_data = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" X_test, target=(ds, \"data_mm\"), name=\"data_test\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3.0 Build the training pipeline\n",
|
||||
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose a compute target\n",
|
||||
"\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
|
||||
"\n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007037308
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"\n",
|
||||
"# Name your cluster\n",
|
||||
"compute_name = \"backtest-mm\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if compute_name in ws.compute_targets:\n",
|
||||
" compute_target = ws.compute_targets[compute_name]\n",
|
||||
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||
" print(\"Found compute target: \" + compute_name)\n",
|
||||
"else:\n",
|
||||
" print(\"Creating a new compute target...\")\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" # Create the compute target\n",
|
||||
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
|
||||
"\n",
|
||||
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
" # If no min node count is provided it will use the scale settings for the cluster\n",
|
||||
" compute_target.wait_for_completion(\n",
|
||||
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # For a more detailed view of current cluster status, use the 'status' property\n",
|
||||
" print(compute_target.status.serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up training parameters\n",
|
||||
"\n",
|
||||
"We need to provide ``ForecastingParameters``, ``AutoMLConfig`` and ``ManyModelsTrainParameters`` objects. For the forecasting task we also need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name(s) definition.\n",
|
||||
"\n",
|
||||
"#### ``ForecastingParameters`` arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
||||
"| **time_column_name** | The name of your time column. |\n",
|
||||
"| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
|
||||
"| **cv_step_size** | Number of periods between two consecutive cross-validation folds. The default value is \\\"auto\\\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value. |\n",
|
||||
"\n",
|
||||
"#### ``AutoMLConfig`` arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **task** | forecasting |\n",
|
||||
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
|
||||
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
|
||||
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **experiment_timeout_hours** | Maximum amount of time in hours that each experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. **It does not control the overall timeout for the pipeline run, instead controls the timeout for each training run per partitioned time series.** |\n",
|
||||
"| **label_column_name** | The name of the label column. |\n",
|
||||
"| **n_cross_validations** | Number of cross validation splits. The default value is \\\"auto\\\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
||||
"| **enable_early_stopping** | Flag to enable early termination if the primary metric is no longer improving. |\n",
|
||||
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
|
||||
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
|
||||
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"#### ``ManyModelsTrainParameters`` arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **automl_settings** | The ``AutoMLConfig`` object defined above. |\n",
|
||||
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007061544
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
|
||||
" ManyModelsTrainParameters,\n",
|
||||
")\n",
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"from azureml.train.automl.automlconfig import AutoMLConfig\n",
|
||||
"\n",
|
||||
"partition_column_names = [TIME_SERIES_ID_COLNAME, \"backtest_iteration\"]\n",
|
||||
"\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=TIME_COLNAME,\n",
|
||||
" forecast_horizon=6,\n",
|
||||
" time_series_id_column_names=partition_column_names,\n",
|
||||
" cv_step_size=\"auto\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_settings = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" iteration_timeout_minutes=10,\n",
|
||||
" iterations=15,\n",
|
||||
" experiment_timeout_hours=0.25,\n",
|
||||
" label_column_name=TARGET_COLNAME,\n",
|
||||
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||
" track_child_runs=False,\n",
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"mm_paramters = ManyModelsTrainParameters(\n",
|
||||
" automl_settings=automl_settings, partition_column_names=partition_column_names\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up many models pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
|
||||
"\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **experiment** | The experiment used for training. |\n",
|
||||
"| **train_data** | The file dataset to be used as input to the training run. |\n",
|
||||
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
|
||||
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. |\n",
|
||||
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
|
||||
"| **run_invocation_timeout** | Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. This must be greater than ``experiment_timeout_hours`` by at least 300 seconds. |\n",
|
||||
"\n",
|
||||
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.\n",
|
||||
"\n",
|
||||
"**Note**: Total time taken for the **training step** in the pipeline to complete = $ \\frac{t}{ p \\times n } \\times ts $\n",
|
||||
"where,\n",
|
||||
"- $ t $ is time taken for training one partition (can be viewed in the training logs)\n",
|
||||
"- $ p $ is ``process_count_per_node``\n",
|
||||
"- $ n $ is ``node_count``\n",
|
||||
"- $ ts $ is total number of partitions in time series based on ``partition_column_names``"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" train_data=train_data,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=2,\n",
|
||||
" run_invocation_timeout=1200,\n",
|
||||
" train_pipeline_parameters=mm_paramters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit the pipeline to run\n",
|
||||
"Next we submit our pipeline to run. The whole training pipeline takes about 20 minutes using a STANDARD_DS12_V2 VM with our current ParallelRunConfig setting."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run = experiment.submit(training_pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Check the run status, if training_run is in completed state, continue to next section. Otherwise, check the portal for failures."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 4.0 Backtesting\n",
|
||||
"Now that we selected the best AutoML model for each backtest fold, we will use these models to generate the forecasts and compare with the actuals."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up output dataset for inference data\n",
|
||||
"Output of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data import OutputFileDatasetConfig\n",
|
||||
"\n",
|
||||
"output_inference_data_ds = OutputFileDatasetConfig(\n",
|
||||
" name=\"many_models_inference_output\",\n",
|
||||
" destination=(dstore, \"backtesting/inference_data/\"),\n",
|
||||
").register_on_complete(name=\"backtesting_data_ds\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For many models we need to provide the ManyModelsInferenceParameters object.\n",
|
||||
"\n",
|
||||
"#### ``ManyModelsInferenceParameters`` arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **partition_column_names** | List of column names that identifies groups. |\n",
|
||||
"| **target_column_name** | \\[Optional] Column name only if the inference dataset has the target. |\n",
|
||||
"| **time_column_name** | \\[Optional] Time column name only if it is timeseries. |\n",
|
||||
"| **inference_type** | \\[Optional] Which inference method to use on the model. Possible values are 'forecast', 'predict_proba', and 'predict'. |\n",
|
||||
"| **forecast_mode** | \\[Optional] The type of forecast to be used, either 'rolling' or 'recursive'; defaults to 'recursive'. |\n",
|
||||
"| **step** | \\[Optional] Number of periods to advance the forecasting window in each iteration **(for rolling forecast only)**; defaults to 1. |\n",
|
||||
"\n",
|
||||
"#### ``get_many_models_batch_inference_steps`` arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **experiment** | The experiment used for inference run. |\n",
|
||||
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
|
||||
"| **compute_target** | The compute target that runs the inference pipeline. |\n",
|
||||
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
|
||||
"| **process_count_per_node** | \\[Optional] The number of processes per node. By default it's 2 (should be at most half of the number of cores in a single node of the compute cluster that will be used for the experiment).\n",
|
||||
"| **inference_pipeline_parameters** | \\[Optional] The ``ManyModelsInferenceParameters`` object defined above. |\n",
|
||||
"| **append_row_file_name** | \\[Optional] The name of the output file (optional, default value is 'parallel_run_step.txt'). Supports 'txt' and 'csv' file extension. A 'txt' file extension generates the output in 'txt' format with space as separator without column names. A 'csv' file extension generates the output in 'csv' format with comma as separator and with column names. |\n",
|
||||
"| **train_run_id** | \\[Optional] The run id of the **training pipeline**. By default it is the latest successful training pipeline run in the experiment. |\n",
|
||||
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
|
||||
"| **run_invocation_timeout** | \\[Optional] Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **output_datastore** | \\[Optional] The ``Datastore`` or ``OutputDatasetConfig`` to be used for output. If specified any pipeline output will be written to that location. If unspecified the default datastore will be used. |\n",
|
||||
"| **arguments** | \\[Optional] Arguments to be passed to inference script. Possible argument is '--forecast_quantiles' followed by quantile values. |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
|
||||
" ManyModelsInferenceParameters,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"mm_parameters = ManyModelsInferenceParameters(\n",
|
||||
" partition_column_names=partition_column_names,\n",
|
||||
" time_column_name=TIME_COLNAME,\n",
|
||||
" target_column_name=TARGET_COLNAME,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"output_file_name = \"parallel_run_step.csv\"\n",
|
||||
"\n",
|
||||
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" inference_data=test_data,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=2,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" run_invocation_timeout=300,\n",
|
||||
" output_datastore=output_inference_data_ds,\n",
|
||||
" train_run_id=training_run.id,\n",
|
||||
" train_experiment_name=training_run.experiment.name,\n",
|
||||
" inference_pipeline_parameters=mm_parameters,\n",
|
||||
" append_row_file_name=output_file_name,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"inference_pipeline = Pipeline(ws, steps=inference_steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_run = experiment.submit(inference_pipeline)\n",
|
||||
"inference_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 5.0 Retrieve results and calculate metrics\n",
|
||||
"\n",
|
||||
"The pipeline returns one file with the predictions for each times series ID and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. \n",
|
||||
"\n",
|
||||
"The next code snippet does the following:\n",
|
||||
"1. Downloads the contents of the output folder that is passed in the parallel run step \n",
|
||||
"2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe \n",
|
||||
"3. Saves the table in csv format and \n",
|
||||
"4. Displays the top 10 rows of the predictions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n",
|
||||
"\n",
|
||||
"PREDICTION_COLNAME = \"Predictions\"\n",
|
||||
"forecasting_results_name = \"forecasting_results\"\n",
|
||||
"forecasting_output_name = \"many_models_inference_output\"\n",
|
||||
"forecast_file = get_output_from_mm_pipeline(\n",
|
||||
" inference_run, forecasting_results_name, forecasting_output_name, output_file_name\n",
|
||||
")\n",
|
||||
"df = pd.read_csv(forecast_file, parse_dates=[0])\n",
|
||||
"print(\n",
|
||||
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
|
||||
")\n",
|
||||
"# Save the csv file to read it in the next step.\n",
|
||||
"df.rename(\n",
|
||||
" columns={TARGET_COLNAME: \"actual_level\", PREDICTION_COLNAME: \"predicted_level\"},\n",
|
||||
" inplace=True,\n",
|
||||
")\n",
|
||||
"df.to_csv(os.path.join(forecasting_results_name, \"forecast.csv\"), index=False)\n",
|
||||
"df.head(10)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## View metrics\n",
|
||||
"We will read in the obtained results and run the helper script, which will generate metrics and create the plots of predicted versus actual values."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from assets.score import calculate_scores_and_build_plots\n",
|
||||
"\n",
|
||||
"backtesting_results = \"backtesting_mm_results\"\n",
|
||||
"os.makedirs(backtesting_results, exist_ok=True)\n",
|
||||
"calculate_scores_and_build_plots(\n",
|
||||
" forecasting_results_name,\n",
|
||||
" backtesting_results,\n",
|
||||
" automl_settings.as_serializable_dict(),\n",
|
||||
")\n",
|
||||
"pd.DataFrame({\"File\": os.listdir(backtesting_results)})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The directory contains a set of files with results:\n",
|
||||
"- forecast.csv contains forecasts for all backtest iterations. The backtest_iteration column contains iteration identifier with the last training date as a suffix\n",
|
||||
"- scores.csv contains all metrics. If data set contains several time series, the metrics are given for all combinations of time series id and iterations, as well as scores for all iterations and time series ids, which are marked as \"all_sets\"\n",
|
||||
"- plots_fcst_vs_actual.pdf contains the predictions vs forecast plots for each iteration and, eash time series is saved as separate plot.\n",
|
||||
"\n",
|
||||
"For demonstration purposes we will display the table of metrics for one of the time series with ID \"ts0\". We will create the utility function, which will build the table with metrics."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_metrics_for_ts(all_metrics, ts):\n",
|
||||
" \"\"\"\n",
|
||||
" Get the metrics for the time series with ID ts and return it as pandas data frame.\n",
|
||||
"\n",
|
||||
" :param all_metrics: The table with all the metrics.\n",
|
||||
" :param ts: The ID of a time series of interest.\n",
|
||||
" :return: The pandas DataFrame with metrics for one time series.\n",
|
||||
" \"\"\"\n",
|
||||
" results_df = None\n",
|
||||
" for ts_id, one_series in all_metrics.groupby(\"time_series_id\"):\n",
|
||||
" if not ts_id.startswith(ts):\n",
|
||||
" continue\n",
|
||||
" iteration = ts_id.split(\"|\")[-1]\n",
|
||||
" df = one_series[[\"metric_name\", \"metric\"]]\n",
|
||||
" df.rename({\"metric\": iteration}, axis=1, inplace=True)\n",
|
||||
" df.set_index(\"metric_name\", inplace=True)\n",
|
||||
" if results_df is None:\n",
|
||||
" results_df = df\n",
|
||||
" else:\n",
|
||||
" results_df = results_df.merge(\n",
|
||||
" df, how=\"inner\", left_index=True, right_index=True\n",
|
||||
" )\n",
|
||||
" results_df.sort_index(axis=1, inplace=True)\n",
|
||||
" return results_df\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"metrics_df = pd.read_csv(os.path.join(backtesting_results, \"scores.csv\"))\n",
|
||||
"ts = \"ts_A\"\n",
|
||||
"get_metrics_for_ts(metrics_df, ts)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Forecast vs actuals plots."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import IFrame\n",
|
||||
"\n",
|
||||
"IFrame(\"./backtesting_mm_results/plots_fcst_vs_actual.pdf\", width=800, height=300)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "jialiu"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"how-to-use-azureml",
|
||||
"automated-machine-learning"
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.5"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
After Width: | Height: | Size: 22 KiB |
@@ -0,0 +1,45 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import pandas as pd
|
||||
|
||||
import azureml.train.automl.runtime._hts.hts_runtime_utilities as hru
|
||||
|
||||
from azureml.core import Run
|
||||
from azureml.core.dataset import Dataset
|
||||
|
||||
# Parse the arguments.
|
||||
args = {
|
||||
"step_size": "--step-size",
|
||||
"step_number": "--step-number",
|
||||
"time_column_name": "--time-column-name",
|
||||
"time_series_id_column_names": "--time-series-id-column-names",
|
||||
"out_dir": "--output-dir",
|
||||
}
|
||||
parser = argparse.ArgumentParser("Parsing input arguments.")
|
||||
for argname, arg in args.items():
|
||||
parser.add_argument(arg, dest=argname, required=True)
|
||||
parsed_args, _ = parser.parse_known_args()
|
||||
step_number = int(parsed_args.step_number)
|
||||
step_size = int(parsed_args.step_size)
|
||||
# Create the working dirrectory to store the temporary csv files.
|
||||
working_dir = parsed_args.out_dir
|
||||
os.makedirs(working_dir, exist_ok=True)
|
||||
# Set input and output
|
||||
script_run = Run.get_context()
|
||||
input_dataset = script_run.input_datasets["training_data"]
|
||||
X_train = input_dataset.to_pandas_dataframe()
|
||||
# Split the data.
|
||||
for i in range(step_number):
|
||||
file_name = os.path.join(working_dir, "backtest_{}.csv".format(i))
|
||||
if parsed_args.time_series_id_column_names:
|
||||
dfs = []
|
||||
for _, one_series in X_train.groupby([parsed_args.time_series_id_column_names]):
|
||||
one_series = one_series.sort_values(
|
||||
by=[parsed_args.time_column_name], inplace=False
|
||||
)
|
||||
dfs.append(one_series.iloc[: len(one_series) - step_size * i])
|
||||
pd.concat(dfs, sort=False, ignore_index=True).to_csv(file_name, index=False)
|
||||
else:
|
||||
X_train.sort_values(by=[parsed_args.time_column_name], inplace=True)
|
||||
X_train.iloc[: len(X_train) - step_size * i].to_csv(file_name, index=False)
|
||||
@@ -0,0 +1,178 @@
|
||||
# ---------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# ---------------------------------------------------------
|
||||
"""The batch script needed for back testing of models using PRS."""
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pickle
|
||||
import re
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from azureml.core.experiment import Experiment
|
||||
from azureml.core.model import Model
|
||||
from azureml.core.run import Run
|
||||
from azureml.automl.core.shared import constants
|
||||
from azureml.automl.runtime.shared.score import scoring
|
||||
from azureml.train.automl import AutoMLConfig
|
||||
|
||||
RE_INVALID_SYMBOLS = re.compile(r"[:\s]")
|
||||
|
||||
model_name = None
|
||||
target_column_name = None
|
||||
current_step_run = None
|
||||
output_dir = None
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_automl_settings():
|
||||
with open(
|
||||
os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
|
||||
)
|
||||
) as json_file:
|
||||
return json.load(json_file)
|
||||
|
||||
|
||||
def init():
|
||||
global model_name
|
||||
global target_column_name
|
||||
global output_dir
|
||||
global automl_settings
|
||||
global model_uid
|
||||
global forecast_quantiles
|
||||
|
||||
logger.info("Initialization of the run.")
|
||||
parser = argparse.ArgumentParser("Parsing input arguments.")
|
||||
parser.add_argument("--output-dir", dest="out", required=True)
|
||||
parser.add_argument("--model-name", dest="model", default=None)
|
||||
parser.add_argument("--model-uid", dest="model_uid", default=None)
|
||||
parser.add_argument(
|
||||
"--forecast_quantiles",
|
||||
nargs="*",
|
||||
type=float,
|
||||
help="forecast quantiles list",
|
||||
default=None,
|
||||
)
|
||||
|
||||
parsed_args, _ = parser.parse_known_args()
|
||||
model_name = parsed_args.model
|
||||
automl_settings = _get_automl_settings()
|
||||
target_column_name = automl_settings.get("label_column_name")
|
||||
output_dir = parsed_args.out
|
||||
model_uid = parsed_args.model_uid
|
||||
forecast_quantiles = parsed_args.forecast_quantiles
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
os.environ["AUTOML_IGNORE_PACKAGE_VERSION_INCOMPATIBILITIES".lower()] = "True"
|
||||
|
||||
|
||||
def get_run():
|
||||
global current_step_run
|
||||
if current_step_run is None:
|
||||
current_step_run = Run.get_context()
|
||||
return current_step_run
|
||||
|
||||
|
||||
def run_backtest(data_input_name: str, file_name: str, experiment: Experiment):
|
||||
"""Re-train the model and return metrics."""
|
||||
data_input = pd.read_csv(
|
||||
data_input_name,
|
||||
parse_dates=[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]],
|
||||
)
|
||||
print(data_input.head())
|
||||
if not automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES):
|
||||
# There is no grains.
|
||||
data_input.sort_values(
|
||||
[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]], inplace=True
|
||||
)
|
||||
X_train = data_input.iloc[: -automl_settings["max_horizon"]]
|
||||
y_train = X_train.pop(target_column_name).values
|
||||
X_test = data_input.iloc[-automl_settings["max_horizon"] :]
|
||||
y_test = X_test.pop(target_column_name).values
|
||||
else:
|
||||
# The data contain grains.
|
||||
dfs_train = []
|
||||
dfs_test = []
|
||||
for _, one_series in data_input.groupby(
|
||||
automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
|
||||
):
|
||||
one_series.sort_values(
|
||||
[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]], inplace=True
|
||||
)
|
||||
dfs_train.append(one_series.iloc[: -automl_settings["max_horizon"]])
|
||||
dfs_test.append(one_series.iloc[-automl_settings["max_horizon"] :])
|
||||
X_train = pd.concat(dfs_train, sort=False, ignore_index=True)
|
||||
y_train = X_train.pop(target_column_name).values
|
||||
X_test = pd.concat(dfs_test, sort=False, ignore_index=True)
|
||||
y_test = X_test.pop(target_column_name).values
|
||||
|
||||
last_training_date = str(
|
||||
X_train[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]].max()
|
||||
)
|
||||
|
||||
if file_name:
|
||||
# If file name is provided, we will load model and retrain it on backtest data.
|
||||
with open(file_name, "rb") as fp:
|
||||
fitted_model = pickle.load(fp)
|
||||
fitted_model.fit(X_train, y_train)
|
||||
else:
|
||||
# We will run the experiment and select the best model.
|
||||
X_train[target_column_name] = y_train
|
||||
automl_config = AutoMLConfig(training_data=X_train, **automl_settings)
|
||||
automl_run = current_step_run.submit_child(automl_config, show_output=True)
|
||||
best_run, fitted_model = automl_run.get_output()
|
||||
# As we have generated models, we need to register them for the future use.
|
||||
description = "Backtest model example"
|
||||
tags = {"last_training_date": last_training_date, "experiment": experiment.name}
|
||||
if model_uid:
|
||||
tags["model_uid"] = model_uid
|
||||
automl_run.register_model(
|
||||
model_name=best_run.properties["model_name"],
|
||||
description=description,
|
||||
tags=tags,
|
||||
)
|
||||
print(f"The model {best_run.properties['model_name']} was registered.")
|
||||
|
||||
# By default we will have forecast quantiles of 0.5, which is our target
|
||||
if forecast_quantiles:
|
||||
if 0.5 not in forecast_quantiles:
|
||||
forecast_quantiles.append(0.5)
|
||||
fitted_model.quantiles = forecast_quantiles
|
||||
|
||||
x_pred = fitted_model.forecast_quantiles(X_test)
|
||||
x_pred["actual_level"] = y_test
|
||||
x_pred["backtest_iteration"] = f"iteration_{last_training_date}"
|
||||
x_pred.rename({0.5: "predicted_level"}, axis=1, inplace=True)
|
||||
date_safe = RE_INVALID_SYMBOLS.sub("_", last_training_date)
|
||||
|
||||
x_pred.to_csv(os.path.join(output_dir, f"iteration_{date_safe}.csv"), index=False)
|
||||
return x_pred
|
||||
|
||||
|
||||
def run(input_files):
|
||||
"""Run the script"""
|
||||
logger.info("Running mini batch.")
|
||||
ws = get_run().experiment.workspace
|
||||
file_name = None
|
||||
if model_name:
|
||||
models = Model.list(ws, name=model_name)
|
||||
cloud_model = None
|
||||
if models:
|
||||
for one_mod in models:
|
||||
if cloud_model is None or one_mod.version > cloud_model.version:
|
||||
logger.info(
|
||||
"Using existing model from the workspace. Model version: {}".format(
|
||||
one_mod.version
|
||||
)
|
||||
)
|
||||
cloud_model = one_mod
|
||||
file_name = cloud_model.download(exist_ok=True)
|
||||
|
||||
forecasts = []
|
||||
logger.info("Running backtest.")
|
||||
for input_file in input_files:
|
||||
forecasts.append(run_backtest(input_file, file_name, get_run().experiment))
|
||||
return pd.concat(forecasts)
|
||||
@@ -0,0 +1,171 @@
|
||||
from typing import Any, Dict, Optional, List
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from matplotlib import pyplot as plt
|
||||
from matplotlib.backends.backend_pdf import PdfPages
|
||||
|
||||
from azureml.automl.core.shared import constants
|
||||
from azureml.automl.core.shared.types import GrainType
|
||||
from azureml.automl.runtime.shared.score import scoring
|
||||
|
||||
GRAIN = "time_series_id"
|
||||
BACKTEST_ITER = "backtest_iteration"
|
||||
ACTUALS = "actual_level"
|
||||
PREDICTIONS = "predicted_level"
|
||||
ALL_GRAINS = "all_sets"
|
||||
|
||||
FORECASTS_FILE = "forecast.csv"
|
||||
SCORES_FILE = "scores.csv"
|
||||
PLOTS_FILE = "plots_fcst_vs_actual.pdf"
|
||||
RE_INVALID_SYMBOLS = re.compile("[: ]")
|
||||
|
||||
|
||||
def _compute_metrics(df: pd.DataFrame, metrics: List[str]):
|
||||
"""
|
||||
Compute metrics for one data frame.
|
||||
|
||||
:param df: The data frame which contains actual_level and predicted_level columns.
|
||||
:return: The data frame with two columns - metric_name and metric.
|
||||
"""
|
||||
scores = scoring.score_regression(
|
||||
y_test=df[ACTUALS], y_pred=df[PREDICTIONS], metrics=metrics
|
||||
)
|
||||
metrics_df = pd.DataFrame(list(scores.items()), columns=["metric_name", "metric"])
|
||||
metrics_df.sort_values(["metric_name"], inplace=True)
|
||||
metrics_df.reset_index(drop=True, inplace=True)
|
||||
return metrics_df
|
||||
|
||||
|
||||
def _format_grain_name(grain: GrainType) -> str:
|
||||
"""
|
||||
Convert grain name to string.
|
||||
|
||||
:param grain: the grain name.
|
||||
:return: the string representation of the given grain.
|
||||
"""
|
||||
if not isinstance(grain, tuple) and not isinstance(grain, list):
|
||||
return str(grain)
|
||||
grain = list(map(str, grain))
|
||||
return "|".join(grain)
|
||||
|
||||
|
||||
def compute_all_metrics(
|
||||
fcst_df: pd.DataFrame,
|
||||
ts_id_colnames: List[str],
|
||||
metric_names: Optional[List[set]] = None,
|
||||
):
|
||||
"""
|
||||
Calculate metrics per grain.
|
||||
|
||||
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
|
||||
:param metric_names: (optional) the list of metric names to return
|
||||
:param ts_id_colnames: (optional) list of grain column names
|
||||
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
|
||||
"""
|
||||
if not metric_names:
|
||||
metric_names = list(constants.Metric.SCALAR_REGRESSION_SET)
|
||||
|
||||
if ts_id_colnames is None:
|
||||
ts_id_colnames = []
|
||||
|
||||
metrics_list = []
|
||||
if ts_id_colnames:
|
||||
for grain, df in fcst_df.groupby(ts_id_colnames):
|
||||
one_grain_metrics_df = _compute_metrics(df, metric_names)
|
||||
one_grain_metrics_df[GRAIN] = _format_grain_name(grain)
|
||||
metrics_list.append(one_grain_metrics_df)
|
||||
|
||||
# overall metrics
|
||||
one_grain_metrics_df = _compute_metrics(fcst_df, metric_names)
|
||||
one_grain_metrics_df[GRAIN] = ALL_GRAINS
|
||||
metrics_list.append(one_grain_metrics_df)
|
||||
|
||||
# collect into a data frame
|
||||
return pd.concat(metrics_list)
|
||||
|
||||
|
||||
def _draw_one_plot(
|
||||
df: pd.DataFrame,
|
||||
time_column_name: str,
|
||||
grain_column_names: List[str],
|
||||
pdf: PdfPages,
|
||||
) -> None:
|
||||
"""
|
||||
Draw the single plot.
|
||||
|
||||
:param df: The data frame with the data to build plot.
|
||||
:param time_column_name: The name of a time column.
|
||||
:param grain_column_names: The name of grain columns.
|
||||
:param pdf: The pdf backend used to render the plot.
|
||||
"""
|
||||
fig, _ = plt.subplots(figsize=(20, 10))
|
||||
df = df.set_index(time_column_name)
|
||||
plt.plot(df[[ACTUALS, PREDICTIONS]])
|
||||
plt.xticks(rotation=45)
|
||||
iteration = df[BACKTEST_ITER].iloc[0]
|
||||
if grain_column_names:
|
||||
grain_name = [df[grain].iloc[0] for grain in grain_column_names]
|
||||
plt.title(f"Time series ID: {_format_grain_name(grain_name)} {iteration}")
|
||||
plt.legend(["actual", "forecast"])
|
||||
plt.close(fig)
|
||||
pdf.savefig(fig)
|
||||
|
||||
|
||||
def calculate_scores_and_build_plots(
|
||||
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
|
||||
):
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
grains = automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
|
||||
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
|
||||
if grains is None:
|
||||
grains = []
|
||||
if isinstance(grains, str):
|
||||
grains = [grains]
|
||||
while BACKTEST_ITER in grains:
|
||||
grains.remove(BACKTEST_ITER)
|
||||
|
||||
dfs = []
|
||||
for fle in os.listdir(input_dir):
|
||||
file_path = os.path.join(input_dir, fle)
|
||||
if os.path.isfile(file_path) and file_path.endswith(".csv"):
|
||||
df_iter = pd.read_csv(file_path, parse_dates=[time_column_name])
|
||||
for _, iteration in df_iter.groupby(BACKTEST_ITER):
|
||||
dfs.append(iteration)
|
||||
forecast_df = pd.concat(dfs, sort=False, ignore_index=True)
|
||||
# To make sure plots are in order, sort the predictions by grain and iteration.
|
||||
ts_index = grains + [BACKTEST_ITER]
|
||||
forecast_df.sort_values(by=ts_index, inplace=True)
|
||||
pdf = PdfPages(os.path.join(output_dir, PLOTS_FILE))
|
||||
for _, one_forecast in forecast_df.groupby(ts_index):
|
||||
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
|
||||
pdf.close()
|
||||
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
|
||||
# Remove np.NaN and np.inf from the prediction and actuals data.
|
||||
forecast_df.replace([np.inf, -np.inf], np.nan, inplace=True)
|
||||
forecast_df.dropna(subset=[ACTUALS, PREDICTIONS], inplace=True)
|
||||
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
|
||||
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = {"forecasts": "--forecasts", "scores_out": "--output-dir"}
|
||||
parser = argparse.ArgumentParser("Parsing input arguments.")
|
||||
for argname, arg in args.items():
|
||||
parser.add_argument(arg, dest=argname, required=True)
|
||||
parsed_args, _ = parser.parse_known_args()
|
||||
input_dir = parsed_args.forecasts
|
||||
output_dir = parsed_args.scores_out
|
||||
with open(
|
||||
os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
|
||||
)
|
||||
) as json_file:
|
||||
automl_settings = json.load(json_file)
|
||||
calculate_scores_and_build_plots(input_dir, output_dir, automl_settings)
|
||||
@@ -0,0 +1,729 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License.\n",
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Automated MachineLearning\n",
|
||||
"_**The model backtesting**_\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"2. [Setup](#Setup)\n",
|
||||
"3. [Data](#Data)\n",
|
||||
"4. [Prepare remote compute and data.](#prepare_remote)\n",
|
||||
"5. [Create the configuration for AutoML backtesting](#train)\n",
|
||||
"6. [Backtest AutoML](#backtest_automl)\n",
|
||||
"7. [View metrics](#Metrics)\n",
|
||||
"8. [Backtest the best model](#backtest_model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"Model backtesting is used to evaluate its performance on historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n",
|
||||
"This notebook is intended to demonstrate backtesting on a single model, this is the best solution for small data sets with a few or one time series in it. For scenarios where we would like to choose the best AutoML model for every backtest iteration, please see [AutoML Forecasting Backtest Many Models Example](../forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb) notebook.\n",
|
||||
"\n",
|
||||
"This notebook demonstrates two ways of backtesting:\n",
|
||||
"- AutoML backtesting: we will train separate AutoML models for historical data\n",
|
||||
"- Model backtesting: from the first run we will select the best model trained on the most recent data, retrain it on the past data and evaluate."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Experiment, Model, Workspace"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook is compatible with Azure ML SDK version 1.35.1 or later."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As part of the setup you have already created a <b>Workspace</b>."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"SKU\"] = ws.sku\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data\n",
|
||||
"For the demonstration purposes we will simulate one year of daily data. To do this we need to specify the following parameters: time column name, time series ID column names and label column name. Our intention is to forecast for two weeks ahead."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"TIME_COLUMN_NAME = \"date\"\n",
|
||||
"TIME_SERIES_ID_COLUMN_NAMES = \"time_series_id\"\n",
|
||||
"LABEL_COLUMN_NAME = \"y\"\n",
|
||||
"FORECAST_HORIZON = 14\n",
|
||||
"FREQUENCY = \"D\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def simulate_timeseries_data(\n",
|
||||
" train_len: int,\n",
|
||||
" test_len: int,\n",
|
||||
" time_column_name: str,\n",
|
||||
" target_column_name: str,\n",
|
||||
" time_series_id_column_name: str,\n",
|
||||
" time_series_number: int = 1,\n",
|
||||
" freq: str = \"H\",\n",
|
||||
"):\n",
|
||||
" \"\"\"\n",
|
||||
" Return the time series of designed length.\n",
|
||||
"\n",
|
||||
" :param train_len: The length of training data (one series).\n",
|
||||
" :type train_len: int\n",
|
||||
" :param test_len: The length of testing data (one series).\n",
|
||||
" :type test_len: int\n",
|
||||
" :param time_column_name: The desired name of a time column.\n",
|
||||
" :type time_column_name: str\n",
|
||||
" :param time_series_number: The number of time series in the data set.\n",
|
||||
" :type time_series_number: int\n",
|
||||
" :param freq: The frequency string representing pandas offset.\n",
|
||||
" see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n",
|
||||
" :type freq: str\n",
|
||||
" :returns: the tuple of train and test data sets.\n",
|
||||
" :rtype: tuple\n",
|
||||
"\n",
|
||||
" \"\"\"\n",
|
||||
" data_train = [] # type: List[pd.DataFrame]\n",
|
||||
" data_test = [] # type: List[pd.DataFrame]\n",
|
||||
" data_length = train_len + test_len\n",
|
||||
" for i in range(time_series_number):\n",
|
||||
" X = pd.DataFrame(\n",
|
||||
" {\n",
|
||||
" time_column_name: pd.date_range(\n",
|
||||
" start=\"2000-01-01\", periods=data_length, freq=freq\n",
|
||||
" ),\n",
|
||||
" target_column_name: np.arange(data_length).astype(float)\n",
|
||||
" + np.random.rand(data_length)\n",
|
||||
" + i * 5,\n",
|
||||
" \"ext_predictor\": np.asarray(range(42, 42 + data_length)),\n",
|
||||
" time_series_id_column_name: np.repeat(\"ts{}\".format(i), data_length),\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
" data_train.append(X[:train_len])\n",
|
||||
" data_test.append(X[train_len:])\n",
|
||||
" train = pd.concat(data_train)\n",
|
||||
" label_train = train.pop(target_column_name).values\n",
|
||||
" test = pd.concat(data_test)\n",
|
||||
" label_test = test.pop(target_column_name).values\n",
|
||||
" return train, label_train, test, label_test\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"n_test_periods = FORECAST_HORIZON\n",
|
||||
"n_train_periods = 365\n",
|
||||
"X_train, y_train, X_test, y_test = simulate_timeseries_data(\n",
|
||||
" train_len=n_train_periods,\n",
|
||||
" test_len=n_test_periods,\n",
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" target_column_name=LABEL_COLUMN_NAME,\n",
|
||||
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAMES,\n",
|
||||
" time_series_number=2,\n",
|
||||
" freq=FREQUENCY,\n",
|
||||
")\n",
|
||||
"X_train[LABEL_COLUMN_NAME] = y_train"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's see what the training data looks like."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_train.tail()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prepare remote compute and data. <a id=\"prepare_remote\"></a>\n",
|
||||
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace), is paired with the storage account, which contains the default data store. We will use it to upload the artificial data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||
"\n",
|
||||
"ds = ws.get_default_datastore()\n",
|
||||
"# Upload saved data to the default data store.\n",
|
||||
"train_data = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" X_train, target=(ds, \"data\"), name=\"data_backtest\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You will need to create a compute target for backtesting. In this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute), you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your CPU cluster\n",
|
||||
"amlcompute_cluster_name = \"backtest-cluster\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the configuration for AutoML backtesting <a id=\"train\"></a>\n",
|
||||
"\n",
|
||||
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
|
||||
"\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **task** | forecasting |\n",
|
||||
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>normalized_root_mean_squared_error</i><br><i>normalized_mean_absolute_error</i> |\n",
|
||||
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **label_column_name** | The name of the label column. |\n",
|
||||
"| **max_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
||||
"| **n_cross_validations** | Number of cross validation splits. The default value is \"auto\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
||||
"|**cv_step_size**|Number of periods between two consecutive cross-validation folds. The default value is \"auto\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value.\n",
|
||||
"| **time_column_name** | The name of your time column. |\n",
|
||||
"| **grain_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"task\": \"forecasting\",\n",
|
||||
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
|
||||
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
|
||||
" \"iterations\": 15,\n",
|
||||
" \"experiment_timeout_hours\": 1, # This also needs to be changed based on the dataset. For larger data set this number needs to be bigger.\n",
|
||||
" \"label_column_name\": LABEL_COLUMN_NAME,\n",
|
||||
" \"n_cross_validations\": \"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||
" \"cv_step_size\": \"auto\",\n",
|
||||
" \"time_column_name\": TIME_COLUMN_NAME,\n",
|
||||
" \"max_horizon\": FORECAST_HORIZON,\n",
|
||||
" \"track_child_runs\": False,\n",
|
||||
" \"grain_column_names\": TIME_SERIES_ID_COLUMN_NAMES,\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Backtest AutoML <a id=\"backtest_automl\"></a>\n",
|
||||
"First we set backtesting parameters: we will step back by 30 days and will make 5 such steps; for each step we will forecast for next two weeks."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# The number of periods to step back on each backtest iteration.\n",
|
||||
"BACKTESTING_PERIOD = 30\n",
|
||||
"# The number of times we will back test the model.\n",
|
||||
"NUMBER_OF_BACKTESTS = 5"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To train AutoML on backtesting folds we will use the [Azure Machine Learning pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/concept-ml-pipelines). It will generate backtest folds, then train model for each of them and calculate the accuracy metrics. To run pipeline, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve (here, it is a forecasting), while a Run corresponds to a specific approach to the problem."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from uuid import uuid1\n",
|
||||
"\n",
|
||||
"from pipeline_helper import get_backtest_pipeline\n",
|
||||
"\n",
|
||||
"pipeline_exp = Experiment(ws, \"automl-backtesting\")\n",
|
||||
"\n",
|
||||
"# We will create the unique identifier to mark our models.\n",
|
||||
"model_uid = str(uuid1())\n",
|
||||
"\n",
|
||||
"pipeline = get_backtest_pipeline(\n",
|
||||
" experiment=pipeline_exp,\n",
|
||||
" dataset=train_data,\n",
|
||||
" # The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.\n",
|
||||
" process_per_node=2,\n",
|
||||
" # The maximum number of nodes for our compute is 6.\n",
|
||||
" node_count=6,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" automl_settings=automl_settings,\n",
|
||||
" step_size=BACKTESTING_PERIOD,\n",
|
||||
" step_number=NUMBER_OF_BACKTESTS,\n",
|
||||
" model_uid=model_uid,\n",
|
||||
" forecast_quantiles=[0.025, 0.975], # Optional\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Run the pipeline and wait for results."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pipeline_run = pipeline_exp.submit(pipeline)\n",
|
||||
"pipeline_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After the run is complete, we can download the results. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"metrics_output = pipeline_run.get_pipeline_output(\"results\")\n",
|
||||
"metrics_output.download(\"backtest_metrics\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## View metrics<a id=\"Metrics\"></a>\n",
|
||||
"To distinguish these metrics from the model backtest, which we will obtain in the next section, we will move the directory with metrics out of the backtest_metrics and will remove the parent folder. We will create the utility function for that."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def copy_scoring_directory(new_name):\n",
|
||||
" scores_path = os.path.join(\"backtest_metrics\", \"azureml\")\n",
|
||||
" directory_list = [os.path.join(scores_path, d) for d in os.listdir(scores_path)]\n",
|
||||
" latest_file = max(directory_list, key=os.path.getctime)\n",
|
||||
" print(\n",
|
||||
" f\"The output directory {latest_file} was created on {pd.Timestamp(os.path.getctime(latest_file), unit='s')} GMT.\"\n",
|
||||
" )\n",
|
||||
" shutil.move(os.path.join(latest_file, \"results\"), new_name)\n",
|
||||
" shutil.rmtree(\"backtest_metrics\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Move the directory and list its contents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"copy_scoring_directory(\"automl_backtest\")\n",
|
||||
"pd.DataFrame({\"File\": os.listdir(\"automl_backtest\")})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The directory contains a set of files with results:\n",
|
||||
"- forecast.csv contains forecasts for all backtest iterations. The backtest_iteration column contains iteration identifier with the last training date as a suffix\n",
|
||||
"- scores.csv contains all metrics. If data set contains several time series, the metrics are given for all combinations of time series id and iterations, as well as scores for all iterations and time series id are marked as \"all_sets\"\n",
|
||||
"- plots_fcst_vs_actual.pdf contains the predictions vs forecast plots for each iteration and time series.\n",
|
||||
"\n",
|
||||
"For demonstration purposes we will display the table of metrics for one of the time series with ID \"ts0\". Again, we will create the utility function, which will be re used in model backtesting."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_metrics_for_ts(all_metrics, ts):\n",
|
||||
" \"\"\"\n",
|
||||
" Get the metrics for the time series with ID ts and return it as pandas data frame.\n",
|
||||
"\n",
|
||||
" :param all_metrics: The table with all the metrics.\n",
|
||||
" :param ts: The ID of a time series of interest.\n",
|
||||
" :return: The pandas DataFrame with metrics for one time series.\n",
|
||||
" \"\"\"\n",
|
||||
" results_df = None\n",
|
||||
" for ts_id, one_series in all_metrics.groupby(\"time_series_id\"):\n",
|
||||
" if not ts_id.startswith(ts):\n",
|
||||
" continue\n",
|
||||
" iteration = ts_id.split(\"|\")[-1]\n",
|
||||
" df = one_series[[\"metric_name\", \"metric\"]]\n",
|
||||
" df.rename({\"metric\": iteration}, axis=1, inplace=True)\n",
|
||||
" df.set_index(\"metric_name\", inplace=True)\n",
|
||||
" if results_df is None:\n",
|
||||
" results_df = df\n",
|
||||
" else:\n",
|
||||
" results_df = results_df.merge(\n",
|
||||
" df, how=\"inner\", left_index=True, right_index=True\n",
|
||||
" )\n",
|
||||
" results_df.sort_index(axis=1, inplace=True)\n",
|
||||
" return results_df\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"metrics_df = pd.read_csv(os.path.join(\"automl_backtest\", \"scores.csv\"))\n",
|
||||
"ts_id = \"ts0\"\n",
|
||||
"get_metrics_for_ts(metrics_df, ts_id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Forecast vs actuals plots."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import IFrame\n",
|
||||
"\n",
|
||||
"IFrame(\"./automl_backtest/plots_fcst_vs_actual.pdf\", width=800, height=300)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# <font color='blue'>Backtest the best model</font> <a id=\"backtest_model\"></a>\n",
|
||||
"\n",
|
||||
"For model backtesting we will use the same parameters we used to backtest AutoML. All the models, we have obtained in the previous run were registered in our workspace. To identify the model, each was assigned a tag with the last trainig date."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_list = Model.list(ws, tags=[[\"experiment\", \"automl-backtesting\"]])\n",
|
||||
"model_data = {\"name\": [], \"last_training_date\": []}\n",
|
||||
"for model in model_list:\n",
|
||||
" if (\n",
|
||||
" \"last_training_date\" not in model.tags\n",
|
||||
" or \"model_uid\" not in model.tags\n",
|
||||
" or model.tags[\"model_uid\"] != model_uid\n",
|
||||
" ):\n",
|
||||
" continue\n",
|
||||
" model_data[\"name\"].append(model.name)\n",
|
||||
" model_data[\"last_training_date\"].append(\n",
|
||||
" pd.Timestamp(model.tags[\"last_training_date\"])\n",
|
||||
" )\n",
|
||||
"df_models = pd.DataFrame(model_data)\n",
|
||||
"df_models.sort_values([\"last_training_date\"], inplace=True)\n",
|
||||
"df_models.reset_index(inplace=True, drop=True)\n",
|
||||
"df_models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will backtest the model trained on the most recet data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_name = df_models[\"name\"].iloc[-1]\n",
|
||||
"model_name"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrain the models.\n",
|
||||
"Assemble the pipeline, which will retrain the best model from AutoML run on historical data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pipeline_exp = Experiment(ws, \"model-backtesting\")\n",
|
||||
"\n",
|
||||
"pipeline = get_backtest_pipeline(\n",
|
||||
" experiment=pipeline_exp,\n",
|
||||
" dataset=train_data,\n",
|
||||
" # The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.\n",
|
||||
" process_per_node=2,\n",
|
||||
" # The maximum number of nodes for our compute is 6.\n",
|
||||
" node_count=6,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" automl_settings=automl_settings,\n",
|
||||
" step_size=BACKTESTING_PERIOD,\n",
|
||||
" step_number=NUMBER_OF_BACKTESTS,\n",
|
||||
" model_name=model_name,\n",
|
||||
" forecast_quantiles=[0.025, 0.975],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Launch the backtesting pipeline."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pipeline_run = pipeline_exp.submit(pipeline)\n",
|
||||
"pipeline_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The metrics are stored in the pipeline output named \"score\". The next code will download the table with metrics."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"metrics_output = pipeline_run.get_pipeline_output(\"results\")\n",
|
||||
"metrics_output.download(\"backtest_metrics\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Again, we will copy the data files from the downloaded directory, but in this case we will call the folder \"model_backtest\"; it will contain the same files as the one for AutoML backtesting."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"copy_scoring_directory(\"model_backtest\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally, we will display the metrics."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_metrics_df = pd.read_csv(os.path.join(\"model_backtest\", \"scores.csv\"))\n",
|
||||
"get_metrics_for_ts(model_metrics_df, ts_id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Forecast vs actuals plots."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import IFrame\n",
|
||||
"\n",
|
||||
"IFrame(\"./model_backtest/plots_fcst_vs_actual.pdf\", width=800, height=300)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "jialiu"
|
||||
}
|
||||
],
|
||||
"category": "tutorial",
|
||||
"compute": [
|
||||
"Remote"
|
||||
],
|
||||
"datasets": [
|
||||
"None"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"Azure ML AutoML"
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.5"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -0,0 +1,169 @@
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
import os
|
||||
|
||||
import azureml.train.automl.runtime._hts.hts_runtime_utilities as hru
|
||||
|
||||
from azureml._restclient.jasmine_client import JasmineClient
|
||||
from azureml.contrib.automl.pipeline.steps import utilities
|
||||
from azureml.core import RunConfiguration
|
||||
from azureml.core.compute import ComputeTarget
|
||||
from azureml.core.experiment import Experiment
|
||||
from azureml.data import LinkTabularOutputDatasetConfig, TabularDataset
|
||||
from azureml.pipeline.core import Pipeline, PipelineData, PipelineParameter
|
||||
from azureml.pipeline.steps import ParallelRunConfig, ParallelRunStep, PythonScriptStep
|
||||
from azureml.train.automl.constants import Scenarios
|
||||
from azureml.data.dataset_consumption_config import DatasetConsumptionConfig
|
||||
|
||||
|
||||
PROJECT_FOLDER = "assets"
|
||||
SETTINGS_FILE = "automl_settings.json"
|
||||
|
||||
|
||||
def get_backtest_pipeline(
|
||||
experiment: Experiment,
|
||||
dataset: TabularDataset,
|
||||
process_per_node: int,
|
||||
node_count: int,
|
||||
compute_target: ComputeTarget,
|
||||
automl_settings: Dict[str, Any],
|
||||
step_size: int,
|
||||
step_number: int,
|
||||
model_name: Optional[str] = None,
|
||||
model_uid: Optional[str] = None,
|
||||
forecast_quantiles: Optional[list] = None,
|
||||
) -> Pipeline:
|
||||
"""
|
||||
:param experiment: The experiment used to run the pipeline.
|
||||
:param dataset: Tabular data set to be used for model training.
|
||||
:param process_per_node: The number of processes per node. Generally it should be the number of cores
|
||||
on the node divided by two.
|
||||
:param node_count: The number of nodes to be used.
|
||||
:param compute_target: The compute target to be used to run the pipeline.
|
||||
:param model_name: The name of a model to be back tested.
|
||||
:param automl_settings: The dictionary with automl settings.
|
||||
:param step_size: The number of periods to step back in backtesting.
|
||||
:param step_number: The number of backtesting iterations.
|
||||
:param model_uid: The uid to mark models from this run of the experiment.
|
||||
:param forecast_quantiles: The forecast quantiles that are required in the inference.
|
||||
:return: The pipeline to be used for model retraining.
|
||||
**Note:** The output will be uploaded in the pipeline output
|
||||
called 'score'.
|
||||
"""
|
||||
jasmine_client = JasmineClient(
|
||||
service_context=experiment.workspace.service_context,
|
||||
experiment_name=experiment.name,
|
||||
experiment_id=experiment.id,
|
||||
)
|
||||
env = jasmine_client.get_curated_environment(
|
||||
scenario=Scenarios.AUTOML,
|
||||
enable_dnn=False,
|
||||
enable_gpu=False,
|
||||
compute=compute_target,
|
||||
compute_sku=experiment.workspace.compute_targets.get(
|
||||
compute_target.name
|
||||
).vm_size,
|
||||
)
|
||||
data_results = PipelineData(
|
||||
name="results", datastore=None, pipeline_output_name="results"
|
||||
)
|
||||
############################################################
|
||||
# Split the data set using python script.
|
||||
############################################################
|
||||
run_config = RunConfiguration()
|
||||
run_config.docker.use_docker = True
|
||||
run_config.environment = env
|
||||
|
||||
utilities.set_environment_variables_for_run(run_config)
|
||||
|
||||
split_data = PipelineData(name="split_data_output", datastore=None).as_dataset()
|
||||
split_step = PythonScriptStep(
|
||||
name="split_data_for_backtest",
|
||||
script_name="data_split.py",
|
||||
inputs=[dataset.as_named_input("training_data")],
|
||||
outputs=[split_data],
|
||||
source_directory=PROJECT_FOLDER,
|
||||
arguments=[
|
||||
"--step-size",
|
||||
step_size,
|
||||
"--step-number",
|
||||
step_number,
|
||||
"--time-column-name",
|
||||
automl_settings.get("time_column_name"),
|
||||
"--time-series-id-column-names",
|
||||
automl_settings.get("grain_column_names"),
|
||||
"--output-dir",
|
||||
split_data,
|
||||
],
|
||||
runconfig=run_config,
|
||||
compute_target=compute_target,
|
||||
allow_reuse=False,
|
||||
)
|
||||
############################################################
|
||||
# We will do the backtest the parallel run step.
|
||||
############################################################
|
||||
settings_path = os.path.join(PROJECT_FOLDER, SETTINGS_FILE)
|
||||
hru.dump_object_to_json(automl_settings, settings_path)
|
||||
mini_batch_size = PipelineParameter(name="batch_size_param", default_value=str(1))
|
||||
back_test_config = ParallelRunConfig(
|
||||
source_directory=PROJECT_FOLDER,
|
||||
entry_script="retrain_models.py",
|
||||
mini_batch_size=mini_batch_size,
|
||||
error_threshold=-1,
|
||||
output_action="append_row",
|
||||
append_row_file_name="outputs.txt",
|
||||
compute_target=compute_target,
|
||||
environment=env,
|
||||
process_count_per_node=process_per_node,
|
||||
run_invocation_timeout=3600,
|
||||
node_count=node_count,
|
||||
)
|
||||
utilities.set_environment_variables_for_run(back_test_config)
|
||||
forecasts = PipelineData(name="forecasts", datastore=None)
|
||||
if model_name:
|
||||
parallel_step_name = "{}-backtest".format(model_name.replace("_", "-"))
|
||||
else:
|
||||
parallel_step_name = "AutoML-backtest"
|
||||
|
||||
prs_args = [
|
||||
"--target_column_name",
|
||||
automl_settings.get("label_column_name"),
|
||||
"--output-dir",
|
||||
forecasts,
|
||||
]
|
||||
if model_name is not None:
|
||||
prs_args.append("--model-name")
|
||||
prs_args.append(model_name)
|
||||
if model_uid is not None:
|
||||
prs_args.append("--model-uid")
|
||||
prs_args.append(model_uid)
|
||||
if forecast_quantiles:
|
||||
prs_args.append("--forecast_quantiles")
|
||||
prs_args.extend(forecast_quantiles)
|
||||
backtest_prs = ParallelRunStep(
|
||||
name=parallel_step_name,
|
||||
parallel_run_config=back_test_config,
|
||||
arguments=prs_args,
|
||||
inputs=[split_data],
|
||||
output=forecasts,
|
||||
allow_reuse=False,
|
||||
)
|
||||
############################################################
|
||||
# Then we collect the output and return it as scores output.
|
||||
############################################################
|
||||
collection_step = PythonScriptStep(
|
||||
name="score",
|
||||
script_name="score.py",
|
||||
inputs=[forecasts.as_mount()],
|
||||
outputs=[data_results],
|
||||
source_directory=PROJECT_FOLDER,
|
||||
arguments=["--forecasts", forecasts, "--output-dir", data_results],
|
||||
runconfig=run_config,
|
||||
compute_target=compute_target,
|
||||
allow_reuse=False,
|
||||
)
|
||||
# Build and return the pipeline.
|
||||
return Pipeline(
|
||||
workspace=experiment.workspace,
|
||||
steps=[split_step, backtest_prs, collection_step],
|
||||
)
|
||||
@@ -1,20 +0,0 @@
|
||||
DATE,grain,BeerProduction
|
||||
2017-01-01,grain,9049
|
||||
2017-02-01,grain,10458
|
||||
2017-03-01,grain,12489
|
||||
2017-04-01,grain,11499
|
||||
2017-05-01,grain,13553
|
||||
2017-06-01,grain,14740
|
||||
2017-07-01,grain,11424
|
||||
2017-08-01,grain,13412
|
||||
2017-09-01,grain,11917
|
||||
2017-10-01,grain,12721
|
||||
2017-11-01,grain,13272
|
||||
2017-12-01,grain,14278
|
||||
2018-01-01,grain,9572
|
||||
2018-02-01,grain,10423
|
||||
2018-03-01,grain,12667
|
||||
2018-04-01,grain,11904
|
||||
2018-05-01,grain,14120
|
||||
2018-06-01,grain,14565
|
||||
2018-07-01,grain,12622
|
||||
|
@@ -1,301 +0,0 @@
|
||||
DATE,grain,BeerProduction
|
||||
1992-01-01,grain,3459
|
||||
1992-02-01,grain,3458
|
||||
1992-03-01,grain,4002
|
||||
1992-04-01,grain,4564
|
||||
1992-05-01,grain,4221
|
||||
1992-06-01,grain,4529
|
||||
1992-07-01,grain,4466
|
||||
1992-08-01,grain,4137
|
||||
1992-09-01,grain,4126
|
||||
1992-10-01,grain,4259
|
||||
1992-11-01,grain,4240
|
||||
1992-12-01,grain,4936
|
||||
1993-01-01,grain,3031
|
||||
1993-02-01,grain,3261
|
||||
1993-03-01,grain,4160
|
||||
1993-04-01,grain,4377
|
||||
1993-05-01,grain,4307
|
||||
1993-06-01,grain,4696
|
||||
1993-07-01,grain,4458
|
||||
1993-08-01,grain,4457
|
||||
1993-09-01,grain,4364
|
||||
1993-10-01,grain,4236
|
||||
1993-11-01,grain,4500
|
||||
1993-12-01,grain,4974
|
||||
1994-01-01,grain,3075
|
||||
1994-02-01,grain,3377
|
||||
1994-03-01,grain,4443
|
||||
1994-04-01,grain,4261
|
||||
1994-05-01,grain,4460
|
||||
1994-06-01,grain,4985
|
||||
1994-07-01,grain,4324
|
||||
1994-08-01,grain,4719
|
||||
1994-09-01,grain,4374
|
||||
1994-10-01,grain,4248
|
||||
1994-11-01,grain,4784
|
||||
1994-12-01,grain,4971
|
||||
1995-01-01,grain,3370
|
||||
1995-02-01,grain,3484
|
||||
1995-03-01,grain,4269
|
||||
1995-04-01,grain,3994
|
||||
1995-05-01,grain,4715
|
||||
1995-06-01,grain,4974
|
||||
1995-07-01,grain,4223
|
||||
1995-08-01,grain,5000
|
||||
1995-09-01,grain,4235
|
||||
1995-10-01,grain,4554
|
||||
1995-11-01,grain,4851
|
||||
1995-12-01,grain,4826
|
||||
1996-01-01,grain,3699
|
||||
1996-02-01,grain,3983
|
||||
1996-03-01,grain,4262
|
||||
1996-04-01,grain,4619
|
||||
1996-05-01,grain,5219
|
||||
1996-06-01,grain,4836
|
||||
1996-07-01,grain,4941
|
||||
1996-08-01,grain,5062
|
||||
1996-09-01,grain,4365
|
||||
1996-10-01,grain,5012
|
||||
1996-11-01,grain,4850
|
||||
1996-12-01,grain,5097
|
||||
1997-01-01,grain,3758
|
||||
1997-02-01,grain,3825
|
||||
1997-03-01,grain,4454
|
||||
1997-04-01,grain,4635
|
||||
1997-05-01,grain,5210
|
||||
1997-06-01,grain,5057
|
||||
1997-07-01,grain,5231
|
||||
1997-08-01,grain,5034
|
||||
1997-09-01,grain,4970
|
||||
1997-10-01,grain,5342
|
||||
1997-11-01,grain,4831
|
||||
1997-12-01,grain,5965
|
||||
1998-01-01,grain,3796
|
||||
1998-02-01,grain,4019
|
||||
1998-03-01,grain,4898
|
||||
1998-04-01,grain,5090
|
||||
1998-05-01,grain,5237
|
||||
1998-06-01,grain,5447
|
||||
1998-07-01,grain,5435
|
||||
1998-08-01,grain,5107
|
||||
1998-09-01,grain,5515
|
||||
1998-10-01,grain,5583
|
||||
1998-11-01,grain,5346
|
||||
1998-12-01,grain,6286
|
||||
1999-01-01,grain,4032
|
||||
1999-02-01,grain,4435
|
||||
1999-03-01,grain,5479
|
||||
1999-04-01,grain,5483
|
||||
1999-05-01,grain,5587
|
||||
1999-06-01,grain,6176
|
||||
1999-07-01,grain,5621
|
||||
1999-08-01,grain,5889
|
||||
1999-09-01,grain,5828
|
||||
1999-10-01,grain,5849
|
||||
1999-11-01,grain,6180
|
||||
1999-12-01,grain,6771
|
||||
2000-01-01,grain,4243
|
||||
2000-02-01,grain,4952
|
||||
2000-03-01,grain,6008
|
||||
2000-04-01,grain,5353
|
||||
2000-05-01,grain,6435
|
||||
2000-06-01,grain,6673
|
||||
2000-07-01,grain,5636
|
||||
2000-08-01,grain,6630
|
||||
2000-09-01,grain,5887
|
||||
2000-10-01,grain,6322
|
||||
2000-11-01,grain,6520
|
||||
2000-12-01,grain,6678
|
||||
2001-01-01,grain,5082
|
||||
2001-02-01,grain,5216
|
||||
2001-03-01,grain,5893
|
||||
2001-04-01,grain,5894
|
||||
2001-05-01,grain,6799
|
||||
2001-06-01,grain,6667
|
||||
2001-07-01,grain,6374
|
||||
2001-08-01,grain,6840
|
||||
2001-09-01,grain,5575
|
||||
2001-10-01,grain,6545
|
||||
2001-11-01,grain,6789
|
||||
2001-12-01,grain,7180
|
||||
2002-01-01,grain,5117
|
||||
2002-02-01,grain,5442
|
||||
2002-03-01,grain,6337
|
||||
2002-04-01,grain,6525
|
||||
2002-05-01,grain,7216
|
||||
2002-06-01,grain,6761
|
||||
2002-07-01,grain,6958
|
||||
2002-08-01,grain,7070
|
||||
2002-09-01,grain,6148
|
||||
2002-10-01,grain,6924
|
||||
2002-11-01,grain,6716
|
||||
2002-12-01,grain,7975
|
||||
2003-01-01,grain,5326
|
||||
2003-02-01,grain,5609
|
||||
2003-03-01,grain,6414
|
||||
2003-04-01,grain,6741
|
||||
2003-05-01,grain,7144
|
||||
2003-06-01,grain,7133
|
||||
2003-07-01,grain,7568
|
||||
2003-08-01,grain,7266
|
||||
2003-09-01,grain,6634
|
||||
2003-10-01,grain,7626
|
||||
2003-11-01,grain,6843
|
||||
2003-12-01,grain,8540
|
||||
2004-01-01,grain,5629
|
||||
2004-02-01,grain,5898
|
||||
2004-03-01,grain,7045
|
||||
2004-04-01,grain,7094
|
||||
2004-05-01,grain,7333
|
||||
2004-06-01,grain,7918
|
||||
2004-07-01,grain,7289
|
||||
2004-08-01,grain,7396
|
||||
2004-09-01,grain,7259
|
||||
2004-10-01,grain,7268
|
||||
2004-11-01,grain,7731
|
||||
2004-12-01,grain,9058
|
||||
2005-01-01,grain,5557
|
||||
2005-02-01,grain,6237
|
||||
2005-03-01,grain,7723
|
||||
2005-04-01,grain,7262
|
||||
2005-05-01,grain,8241
|
||||
2005-06-01,grain,8757
|
||||
2005-07-01,grain,7352
|
||||
2005-08-01,grain,8496
|
||||
2005-09-01,grain,7741
|
||||
2005-10-01,grain,7710
|
||||
2005-11-01,grain,8247
|
||||
2005-12-01,grain,8902
|
||||
2006-01-01,grain,6066
|
||||
2006-02-01,grain,6590
|
||||
2006-03-01,grain,7923
|
||||
2006-04-01,grain,7335
|
||||
2006-05-01,grain,8843
|
||||
2006-06-01,grain,9327
|
||||
2006-07-01,grain,7792
|
||||
2006-08-01,grain,9156
|
||||
2006-09-01,grain,8037
|
||||
2006-10-01,grain,8640
|
||||
2006-11-01,grain,9128
|
||||
2006-12-01,grain,9545
|
||||
2007-01-01,grain,6627
|
||||
2007-02-01,grain,6743
|
||||
2007-03-01,grain,8195
|
||||
2007-04-01,grain,7828
|
||||
2007-05-01,grain,9570
|
||||
2007-06-01,grain,9484
|
||||
2007-07-01,grain,8608
|
||||
2007-08-01,grain,9543
|
||||
2007-09-01,grain,8123
|
||||
2007-10-01,grain,9649
|
||||
2007-11-01,grain,9390
|
||||
2007-12-01,grain,10065
|
||||
2008-01-01,grain,7093
|
||||
2008-02-01,grain,7483
|
||||
2008-03-01,grain,8365
|
||||
2008-04-01,grain,8895
|
||||
2008-05-01,grain,9794
|
||||
2008-06-01,grain,9977
|
||||
2008-07-01,grain,9553
|
||||
2008-08-01,grain,9375
|
||||
2008-09-01,grain,9225
|
||||
2008-10-01,grain,9948
|
||||
2008-11-01,grain,8758
|
||||
2008-12-01,grain,10839
|
||||
2009-01-01,grain,7266
|
||||
2009-02-01,grain,7578
|
||||
2009-03-01,grain,8688
|
||||
2009-04-01,grain,9162
|
||||
2009-05-01,grain,9369
|
||||
2009-06-01,grain,10167
|
||||
2009-07-01,grain,9507
|
||||
2009-08-01,grain,8923
|
||||
2009-09-01,grain,9272
|
||||
2009-10-01,grain,9075
|
||||
2009-11-01,grain,8949
|
||||
2009-12-01,grain,10843
|
||||
2010-01-01,grain,6558
|
||||
2010-02-01,grain,7481
|
||||
2010-03-01,grain,9475
|
||||
2010-04-01,grain,9424
|
||||
2010-05-01,grain,9351
|
||||
2010-06-01,grain,10552
|
||||
2010-07-01,grain,9077
|
||||
2010-08-01,grain,9273
|
||||
2010-09-01,grain,9420
|
||||
2010-10-01,grain,9413
|
||||
2010-11-01,grain,9866
|
||||
2010-12-01,grain,11455
|
||||
2011-01-01,grain,6901
|
||||
2011-02-01,grain,8014
|
||||
2011-03-01,grain,9832
|
||||
2011-04-01,grain,9281
|
||||
2011-05-01,grain,9967
|
||||
2011-06-01,grain,11344
|
||||
2011-07-01,grain,9106
|
||||
2011-08-01,grain,10469
|
||||
2011-09-01,grain,10085
|
||||
2011-10-01,grain,9612
|
||||
2011-11-01,grain,10328
|
||||
2011-12-01,grain,11483
|
||||
2012-01-01,grain,7486
|
||||
2012-02-01,grain,8641
|
||||
2012-03-01,grain,9709
|
||||
2012-04-01,grain,9423
|
||||
2012-05-01,grain,11342
|
||||
2012-06-01,grain,11274
|
||||
2012-07-01,grain,9845
|
||||
2012-08-01,grain,11163
|
||||
2012-09-01,grain,9532
|
||||
2012-10-01,grain,10754
|
||||
2012-11-01,grain,10953
|
||||
2012-12-01,grain,11922
|
||||
2013-01-01,grain,8395
|
||||
2013-02-01,grain,8888
|
||||
2013-03-01,grain,10110
|
||||
2013-04-01,grain,10493
|
||||
2013-05-01,grain,12218
|
||||
2013-06-01,grain,11385
|
||||
2013-07-01,grain,11186
|
||||
2013-08-01,grain,11462
|
||||
2013-09-01,grain,10494
|
||||
2013-10-01,grain,11540
|
||||
2013-11-01,grain,11138
|
||||
2013-12-01,grain,12709
|
||||
2014-01-01,grain,8557
|
||||
2014-02-01,grain,9059
|
||||
2014-03-01,grain,10055
|
||||
2014-04-01,grain,10977
|
||||
2014-05-01,grain,11792
|
||||
2014-06-01,grain,11904
|
||||
2014-07-01,grain,10965
|
||||
2014-08-01,grain,10981
|
||||
2014-09-01,grain,10828
|
||||
2014-10-01,grain,11817
|
||||
2014-11-01,grain,10470
|
||||
2014-12-01,grain,13310
|
||||
2015-01-01,grain,8400
|
||||
2015-02-01,grain,9062
|
||||
2015-03-01,grain,10722
|
||||
2015-04-01,grain,11107
|
||||
2015-05-01,grain,11508
|
||||
2015-06-01,grain,12904
|
||||
2015-07-01,grain,11869
|
||||
2015-08-01,grain,11224
|
||||
2015-09-01,grain,12022
|
||||
2015-10-01,grain,11983
|
||||
2015-11-01,grain,11506
|
||||
2015-12-01,grain,14183
|
||||
2016-01-01,grain,8650
|
||||
2016-02-01,grain,10323
|
||||
2016-03-01,grain,12110
|
||||
2016-04-01,grain,11424
|
||||
2016-05-01,grain,12243
|
||||
2016-06-01,grain,13686
|
||||
2016-07-01,grain,10956
|
||||
2016-08-01,grain,12706
|
||||
2016-09-01,grain,12279
|
||||
2016-10-01,grain,11914
|
||||
2016-11-01,grain,13025
|
||||
2016-12-01,grain,14431
|
||||
|
@@ -1,11 +0,0 @@
|
||||
name: auto-ml-forecasting-beer-remote
|
||||
dependencies:
|
||||
- py-xgboost<=0.90
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- numpy==1.16.2
|
||||
- pandas==0.23.4
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
- azureml-train
|
||||
@@ -1,137 +0,0 @@
|
||||
import pandas as pd
|
||||
from azureml.core import Environment
|
||||
from azureml.core.conda_dependencies import CondaDependencies
|
||||
from azureml.train.estimator import Estimator
|
||||
from azureml.core.run import Run
|
||||
|
||||
|
||||
def split_fraction_by_grain(df, fraction, time_column_name,
|
||||
grain_column_names=None):
|
||||
|
||||
if not grain_column_names:
|
||||
df['tmp_grain_column'] = 'grain'
|
||||
grain_column_names = ['tmp_grain_column']
|
||||
|
||||
"""Group df by grain and split on last n rows for each group."""
|
||||
df_grouped = (df.sort_values(time_column_name)
|
||||
.groupby(grain_column_names, group_keys=False))
|
||||
|
||||
df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-int(len(dfg) *
|
||||
fraction)] if fraction > 0 else dfg)
|
||||
|
||||
df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-int(len(dfg) *
|
||||
fraction):] if fraction > 0 else dfg[:0])
|
||||
|
||||
if 'tmp_grain_column' in grain_column_names:
|
||||
for df2 in (df, df_head, df_tail):
|
||||
df2.drop('tmp_grain_column', axis=1, inplace=True)
|
||||
|
||||
grain_column_names.remove('tmp_grain_column')
|
||||
|
||||
return df_head, df_tail
|
||||
|
||||
|
||||
def split_full_for_forecasting(df, time_column_name,
|
||||
grain_column_names=None, test_split=0.2):
|
||||
index_name = df.index.name
|
||||
|
||||
# Assumes that there isn't already a column called tmpindex
|
||||
|
||||
df['tmpindex'] = df.index
|
||||
|
||||
train_df, test_df = split_fraction_by_grain(
|
||||
df, test_split, time_column_name, grain_column_names)
|
||||
|
||||
train_df = train_df.set_index('tmpindex')
|
||||
train_df.index.name = index_name
|
||||
|
||||
test_df = test_df.set_index('tmpindex')
|
||||
test_df.index.name = index_name
|
||||
|
||||
df.drop('tmpindex', axis=1, inplace=True)
|
||||
|
||||
return train_df, test_df
|
||||
|
||||
|
||||
def get_result_df(remote_run):
|
||||
children = list(remote_run.get_children(recursive=True))
|
||||
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm',
|
||||
'primary_metric', 'Score'])
|
||||
goal_minimize = False
|
||||
for run in children:
|
||||
if('run_algorithm' in run.properties and 'score' in run.properties):
|
||||
summary_df[run.id] = [run.id, run.properties['run_algorithm'],
|
||||
run.properties['primary_metric'],
|
||||
float(run.properties['score'])]
|
||||
if('goal' in run.properties):
|
||||
goal_minimize = run.properties['goal'].split('_')[-1] == 'min'
|
||||
|
||||
summary_df = summary_df.T.sort_values(
|
||||
'Score',
|
||||
ascending=goal_minimize).drop_duplicates(['run_algorithm'])
|
||||
summary_df = summary_df.set_index('run_algorithm')
|
||||
return summary_df
|
||||
|
||||
|
||||
def run_inference(test_experiment, compute_target, script_folder, train_run,
|
||||
test_dataset, lookback_dataset, max_horizon,
|
||||
target_column_name, time_column_name, freq):
|
||||
model_base_name = 'model.pkl'
|
||||
if 'model_data_location' in train_run.properties:
|
||||
model_location = train_run.properties['model_data_location']
|
||||
_, model_base_name = model_location.rsplit('/', 1)
|
||||
train_run.download_file('outputs/{}'.format(model_base_name), 'inference/{}'.format(model_base_name))
|
||||
train_run.download_file('outputs/conda_env_v_1_0_0.yml', 'inference/condafile.yml')
|
||||
|
||||
inference_env = Environment("myenv")
|
||||
inference_env.docker.enabled = True
|
||||
inference_env.python.conda_dependencies = CondaDependencies(
|
||||
conda_dependencies_file_path='inference/condafile.yml')
|
||||
|
||||
est = Estimator(source_directory=script_folder,
|
||||
entry_script='infer.py',
|
||||
script_params={
|
||||
'--max_horizon': max_horizon,
|
||||
'--target_column_name': target_column_name,
|
||||
'--time_column_name': time_column_name,
|
||||
'--frequency': freq,
|
||||
'--model_path': model_base_name
|
||||
},
|
||||
inputs=[test_dataset.as_named_input('test_data'),
|
||||
lookback_dataset.as_named_input('lookback_data')],
|
||||
compute_target=compute_target,
|
||||
environment_definition=inference_env)
|
||||
|
||||
run = test_experiment.submit(
|
||||
est, tags={
|
||||
'training_run_id': train_run.id,
|
||||
'run_algorithm': train_run.properties['run_algorithm'],
|
||||
'valid_score': train_run.properties['score'],
|
||||
'primary_metric': train_run.properties['primary_metric']
|
||||
})
|
||||
|
||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
||||
return run
|
||||
|
||||
|
||||
def run_multiple_inferences(summary_df, train_experiment, test_experiment,
|
||||
compute_target, script_folder, test_dataset,
|
||||
lookback_dataset, max_horizon, target_column_name,
|
||||
time_column_name, freq):
|
||||
|
||||
for run_name, run_summary in summary_df.iterrows():
|
||||
print(run_name)
|
||||
print(run_summary)
|
||||
run_id = run_summary.run_id
|
||||
train_run = Run(train_experiment, run_id)
|
||||
|
||||
test_run = run_inference(
|
||||
test_experiment, compute_target, script_folder, train_run,
|
||||
test_dataset, lookback_dataset, max_horizon, target_column_name,
|
||||
time_column_name, freq)
|
||||
|
||||
print(test_run)
|
||||
summary_df.loc[summary_df.run_id == run_id,
|
||||
'test_run_id'] = test_run.id
|
||||
|
||||
return summary_df
|
||||
@@ -1,325 +0,0 @@
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import argparse
|
||||
from azureml.core import Run
|
||||
from sklearn.externals import joblib
|
||||
from sklearn.metrics import mean_absolute_error, mean_squared_error
|
||||
from azureml.automl.core._vendor.automl.client.core.common import metrics
|
||||
from automl.client.core.common import constants
|
||||
from pandas.tseries.frequencies import to_offset
|
||||
|
||||
|
||||
def align_outputs(y_predicted, X_trans, X_test, y_test,
|
||||
predicted_column_name='predicted',
|
||||
horizon_colname='horizon_origin'):
|
||||
"""
|
||||
Demonstrates how to get the output aligned to the inputs
|
||||
using pandas indexes. Helps understand what happened if
|
||||
the output's shape differs from the input shape, or if
|
||||
the data got re-sorted by time and grain during forecasting.
|
||||
|
||||
Typical causes of misalignment are:
|
||||
* we predicted some periods that were missing in actuals -> drop from eval
|
||||
* model was asked to predict past max_horizon -> increase max horizon
|
||||
* data at start of X_test was needed for lags -> provide previous periods
|
||||
"""
|
||||
if (horizon_colname in X_trans):
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
|
||||
horizon_colname: X_trans[horizon_colname]})
|
||||
else:
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
|
||||
|
||||
# y and X outputs are aligned by forecast() function contract
|
||||
df_fcst.index = X_trans.index
|
||||
|
||||
# align original X_test to y_test
|
||||
X_test_full = X_test.copy()
|
||||
X_test_full[target_column_name] = y_test
|
||||
|
||||
# X_test_full's index does not include origin, so reset for merge
|
||||
df_fcst.reset_index(inplace=True)
|
||||
X_test_full = X_test_full.reset_index().drop(columns='index')
|
||||
together = df_fcst.merge(X_test_full, how='right')
|
||||
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = together[together[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
return(clean)
|
||||
|
||||
|
||||
def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
||||
max_horizon, X_lookback, y_lookback,
|
||||
freq='D'):
|
||||
"""
|
||||
Produce forecasts on a rolling origin over the given test set.
|
||||
|
||||
Each iteration makes a forecast for the next 'max_horizon' periods
|
||||
with respect to the current origin, then advances the origin by the
|
||||
horizon time duration. The prediction context for each forecast is set so
|
||||
that the forecaster uses the actual target values prior to the current
|
||||
origin time for constructing lag features.
|
||||
|
||||
This function returns a concatenated DataFrame of rolling forecasts.
|
||||
"""
|
||||
print("Using lookback of size: ", y_lookback.size)
|
||||
df_list = []
|
||||
origin_time = X_test[time_column_name].min()
|
||||
X = X_lookback.append(X_test)
|
||||
y = np.concatenate((y_lookback, y_test), axis=0)
|
||||
while origin_time <= X_test[time_column_name].max():
|
||||
# Set the horizon time - end date of the forecast
|
||||
horizon_time = origin_time + max_horizon * to_offset(freq)
|
||||
|
||||
# Extract test data from an expanding window up-to the horizon
|
||||
expand_wind = (X[time_column_name] < horizon_time)
|
||||
X_test_expand = X[expand_wind]
|
||||
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
|
||||
y_query_expand.fill(np.NaN)
|
||||
|
||||
if origin_time != X[time_column_name].min():
|
||||
# Set the context by including actuals up-to the origin time
|
||||
test_context_expand_wind = (X[time_column_name] < origin_time)
|
||||
context_expand_wind = (
|
||||
X_test_expand[time_column_name] < origin_time)
|
||||
y_query_expand[context_expand_wind] = y[test_context_expand_wind]
|
||||
|
||||
# Print some debug info
|
||||
print("Horizon_time:", horizon_time,
|
||||
" origin_time: ", origin_time,
|
||||
" max_horizon: ", max_horizon,
|
||||
" freq: ", freq)
|
||||
print("expand_wind: ", expand_wind)
|
||||
print("y_query_expand")
|
||||
print(y_query_expand)
|
||||
print("X_test")
|
||||
print(X)
|
||||
print("X_test_expand")
|
||||
print(X_test_expand)
|
||||
print("Type of X_test_expand: ", type(X_test_expand))
|
||||
print("Type of y_query_expand: ", type(y_query_expand))
|
||||
|
||||
print("y_query_expand")
|
||||
print(y_query_expand)
|
||||
|
||||
# Make a forecast out to the maximum horizon
|
||||
# y_fcst, X_trans = y_query_expand, X_test_expand
|
||||
y_fcst, X_trans = fitted_model.forecast(X_test_expand, y_query_expand)
|
||||
|
||||
print("y_fcst")
|
||||
print(y_fcst)
|
||||
|
||||
# Align forecast with test set for dates within
|
||||
# the current rolling window
|
||||
trans_tindex = X_trans.index.get_level_values(time_column_name)
|
||||
trans_roll_wind = (trans_tindex >= origin_time) & (
|
||||
trans_tindex < horizon_time)
|
||||
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
|
||||
df_list.append(align_outputs(
|
||||
y_fcst[trans_roll_wind], X_trans[trans_roll_wind],
|
||||
X[test_roll_wind], y[test_roll_wind]))
|
||||
|
||||
# Advance the origin time
|
||||
origin_time = horizon_time
|
||||
|
||||
return pd.concat(df_list, ignore_index=True)
|
||||
|
||||
|
||||
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
|
||||
"""
|
||||
Produce forecasts on a rolling origin over the given test set.
|
||||
|
||||
Each iteration makes a forecast for the next 'max_horizon' periods
|
||||
with respect to the current origin, then advances the origin by the
|
||||
horizon time duration. The prediction context for each forecast is set so
|
||||
that the forecaster uses the actual target values prior to the current
|
||||
origin time for constructing lag features.
|
||||
|
||||
This function returns a concatenated DataFrame of rolling forecasts.
|
||||
"""
|
||||
df_list = []
|
||||
origin_time = X_test[time_column_name].min()
|
||||
while origin_time <= X_test[time_column_name].max():
|
||||
# Set the horizon time - end date of the forecast
|
||||
horizon_time = origin_time + max_horizon * to_offset(freq)
|
||||
|
||||
# Extract test data from an expanding window up-to the horizon
|
||||
expand_wind = (X_test[time_column_name] < horizon_time)
|
||||
X_test_expand = X_test[expand_wind]
|
||||
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
|
||||
y_query_expand.fill(np.NaN)
|
||||
|
||||
if origin_time != X_test[time_column_name].min():
|
||||
# Set the context by including actuals up-to the origin time
|
||||
test_context_expand_wind = (X_test[time_column_name] < origin_time)
|
||||
context_expand_wind = (
|
||||
X_test_expand[time_column_name] < origin_time)
|
||||
y_query_expand[context_expand_wind] = y_test[
|
||||
test_context_expand_wind]
|
||||
|
||||
# Print some debug info
|
||||
print("Horizon_time:", horizon_time,
|
||||
" origin_time: ", origin_time,
|
||||
" max_horizon: ", max_horizon,
|
||||
" freq: ", freq)
|
||||
print("expand_wind: ", expand_wind)
|
||||
print("y_query_expand")
|
||||
print(y_query_expand)
|
||||
print("X_test")
|
||||
print(X_test)
|
||||
print("X_test_expand")
|
||||
print(X_test_expand)
|
||||
print("Type of X_test_expand: ", type(X_test_expand))
|
||||
print("Type of y_query_expand: ", type(y_query_expand))
|
||||
print("y_query_expand")
|
||||
print(y_query_expand)
|
||||
|
||||
# Make a forecast out to the maximum horizon
|
||||
y_fcst, X_trans = fitted_model.forecast(X_test_expand, y_query_expand)
|
||||
|
||||
print("y_fcst")
|
||||
print(y_fcst)
|
||||
|
||||
# Align forecast with test set for dates within the
|
||||
# current rolling window
|
||||
trans_tindex = X_trans.index.get_level_values(time_column_name)
|
||||
trans_roll_wind = (trans_tindex >= origin_time) & (
|
||||
trans_tindex < horizon_time)
|
||||
test_roll_wind = expand_wind & (
|
||||
X_test[time_column_name] >= origin_time)
|
||||
df_list.append(align_outputs(y_fcst[trans_roll_wind],
|
||||
X_trans[trans_roll_wind],
|
||||
X_test[test_roll_wind],
|
||||
y_test[test_roll_wind]))
|
||||
|
||||
# Advance the origin time
|
||||
origin_time = horizon_time
|
||||
|
||||
return pd.concat(df_list, ignore_index=True)
|
||||
|
||||
|
||||
def APE(actual, pred):
|
||||
"""
|
||||
Calculate absolute percentage error.
|
||||
Returns a vector of APE values with same length as actual/pred.
|
||||
"""
|
||||
return 100 * np.abs((actual - pred) / actual)
|
||||
|
||||
|
||||
def MAPE(actual, pred):
|
||||
"""
|
||||
Calculate mean absolute percentage error.
|
||||
Remove NA and values where actual is close to zero
|
||||
"""
|
||||
not_na = ~(np.isnan(actual) | np.isnan(pred))
|
||||
not_zero = ~np.isclose(actual, 0.0)
|
||||
actual_safe = actual[not_na & not_zero]
|
||||
pred_safe = pred[not_na & not_zero]
|
||||
return np.mean(APE(actual_safe, pred_safe))
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--max_horizon', type=int, dest='max_horizon',
|
||||
default=10, help='Max Horizon for forecasting')
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
help='Target Column Name')
|
||||
parser.add_argument(
|
||||
'--time_column_name', type=str, dest='time_column_name',
|
||||
help='Time Column Name')
|
||||
parser.add_argument(
|
||||
'--frequency', type=str, dest='freq',
|
||||
help='Frequency of prediction')
|
||||
parser.add_argument(
|
||||
'--model_path', type=str, dest='model_path',
|
||||
default='model.pkl', help='Filename of model to be loaded')
|
||||
|
||||
|
||||
args = parser.parse_args()
|
||||
max_horizon = args.max_horizon
|
||||
target_column_name = args.target_column_name
|
||||
time_column_name = args.time_column_name
|
||||
freq = args.freq
|
||||
model_path = args.model_path
|
||||
|
||||
|
||||
print('args passed are: ')
|
||||
print(max_horizon)
|
||||
print(target_column_name)
|
||||
print(time_column_name)
|
||||
print(freq)
|
||||
print(model_path)
|
||||
|
||||
run = Run.get_context()
|
||||
# get input dataset by name
|
||||
test_dataset = run.input_datasets['test_data']
|
||||
lookback_dataset = run.input_datasets['lookback_data']
|
||||
|
||||
grain_column_names = []
|
||||
|
||||
df = test_dataset.to_pandas_dataframe()
|
||||
|
||||
print('Read df')
|
||||
print(df)
|
||||
|
||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name])
|
||||
y_test_df = test_dataset.with_timestamp_columns(
|
||||
None).keep_columns(columns=[target_column_name])
|
||||
|
||||
X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name])
|
||||
y_lookback_df = lookback_dataset.with_timestamp_columns(
|
||||
None).keep_columns(columns=[target_column_name])
|
||||
|
||||
fitted_model = joblib.load(model_path)
|
||||
|
||||
|
||||
if hasattr(fitted_model, 'get_lookback'):
|
||||
lookback = fitted_model.get_lookback()
|
||||
df_all = do_rolling_forecast_with_lookback(
|
||||
fitted_model,
|
||||
X_test_df.to_pandas_dataframe(),
|
||||
y_test_df.to_pandas_dataframe().values.T[0],
|
||||
max_horizon,
|
||||
X_lookback_df.to_pandas_dataframe()[-lookback:],
|
||||
y_lookback_df.to_pandas_dataframe().values.T[0][-lookback:],
|
||||
freq)
|
||||
else:
|
||||
df_all = do_rolling_forecast(
|
||||
fitted_model,
|
||||
X_test_df.to_pandas_dataframe(),
|
||||
y_test_df.to_pandas_dataframe().values.T[0],
|
||||
max_horizon,
|
||||
freq)
|
||||
|
||||
print(df_all)
|
||||
|
||||
print("target values:::")
|
||||
print(df_all[target_column_name])
|
||||
print("predicted values:::")
|
||||
print(df_all['predicted'])
|
||||
|
||||
# use automl metrics module
|
||||
scores = metrics.compute_metrics_regression(
|
||||
df_all['predicted'],
|
||||
df_all[target_column_name],
|
||||
list(constants.Metric.SCALAR_REGRESSION_SET),
|
||||
None, None, None)
|
||||
|
||||
print("scores:")
|
||||
print(scores)
|
||||
|
||||
for key, value in scores.items():
|
||||
run.log(key, value)
|
||||
|
||||
print("Simple forecasting model")
|
||||
rmse = np.sqrt(mean_squared_error(
|
||||
df_all[target_column_name], df_all['predicted']))
|
||||
print("[Test Data] \nRoot Mean squared error: %.2f" % rmse)
|
||||
mae = mean_absolute_error(df_all[target_column_name], df_all['predicted'])
|
||||
print('mean_absolute_error score: %.2f' % mae)
|
||||
print('MAPE: %.2f' % MAPE(df_all[target_column_name], df_all['predicted']))
|
||||
|
||||
run.log('rmse', rmse)
|
||||
run.log('mae', mae)
|
||||
@@ -16,6 +16,13 @@
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<font color=\"red\" size=\"5\"><strong>!Important!</strong> </br>This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-task-bike-share)).</font>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -42,7 +49,7 @@
|
||||
"\n",
|
||||
"AutoML highlights here include built-in holiday featurization, accessing engineered feature names, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
|
||||
"\n",
|
||||
"Make sure you have executed the [configuration notebook](../../../configuration.ipynb) before running this notebook.\n",
|
||||
"Make sure you have executed the [configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) before running this notebook.\n",
|
||||
"\n",
|
||||
"Notebook synopsis:\n",
|
||||
"1. Creating an Experiment in an existing Workspace\n",
|
||||
@@ -61,24 +68,30 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1680248038565
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"from datetime import datetime\n",
|
||||
"\n",
|
||||
"from azureml.core import Workspace, Experiment, Dataset\n",
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"from datetime import datetime"
|
||||
"import azureml.core\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"from azureml.automl.core.featurization import FeaturizationConfig\n",
|
||||
"from azureml.core import Dataset, Experiment, Workspace\n",
|
||||
"from azureml.train.automl import AutoMLConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -87,7 +100,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.4.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -107,19 +119,20 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'automl-bikeshareforecasting'\n",
|
||||
"experiment_name = \"automl-bikeshareforecasting\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['SKU'] = ws.sku\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"SKU\"] = ws.sku\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -129,9 +142,12 @@
|
||||
"source": [
|
||||
"## Compute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this article on the default limits and how to request more quota."
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -149,10 +165,11 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
||||
" max_nodes=4)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -164,23 +181,6 @@
|
||||
"source": [
|
||||
"## Data\n",
|
||||
"\n",
|
||||
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace) is paired with the storage account, which contains the default data store. We will use it to upload the bike share data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore.upload_files(files = ['./bike-no.csv'], target_path = 'dataset/', overwrite = True,show_progress = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's set up what we know about the dataset. \n",
|
||||
"\n",
|
||||
"**Target column** is what we want to forecast.\n",
|
||||
@@ -194,23 +194,54 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"target_column_name = 'cnt'\n",
|
||||
"time_column_name = 'date'"
|
||||
"target_column_name = \"cnt\"\n",
|
||||
"time_column_name = \"date\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"You are now ready to load the historical bike share data. We will load the CSV file into a plain pandas DataFrame."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
"source_hidden": false
|
||||
},
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'dataset/bike-no.csv')]).with_timestamp_columns(fine_grain_timestamp=time_column_name) \n",
|
||||
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
|
||||
"all_data = pd.read_csv(\"bike-no.csv\", parse_dates=[time_column_name])\n",
|
||||
"\n",
|
||||
"# Drop the columns 'casual' and 'registered' as these columns are a breakdown of the total and therefore a leak.\n",
|
||||
"all_data.drop([\"casual\", \"registered\"], axis=1, inplace=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### Split the data\n",
|
||||
"\n",
|
||||
@@ -220,22 +251,80 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1680247376789
|
||||
},
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
"source_hidden": false
|
||||
},
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# select data that occurs before a specified date\n",
|
||||
"train = dataset.time_before(datetime(2012, 8, 31), include_boundary=True)\n",
|
||||
"train.to_pandas_dataframe().tail(5).reset_index(drop=True)"
|
||||
"train = all_data[all_data[time_column_name] <= pd.Timestamp(\"2012-08-31\")].copy()\n",
|
||||
"test = all_data[all_data[time_column_name] >= pd.Timestamp(\"2012-09-01\")].copy()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Upload data to datastore\n",
|
||||
"\n",
|
||||
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace) is paired with the storage account, which contains the default data store. We will use it to upload the bike share data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
"source_hidden": false
|
||||
},
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test = dataset.time_after(datetime(2012, 9, 1), include_boundary=True)\n",
|
||||
"test.to_pandas_dataframe().head(5).reset_index(drop=True)"
|
||||
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||
"\n",
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"train_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" train, target=(datastore, \"dataset/\"), name=\"bike_no_train\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"test_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" test, target=(datastore, \"dataset/\"), name=\"bike_no_test\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Forecasting Parameters\n",
|
||||
"To define forecasting parameters for your experiment training, you can leverage the ForecastingParameters class. The table below details the forecasting parameter we will be passing into our experiment.\n",
|
||||
"\n",
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**time_column_name**|The name of your time column.|\n",
|
||||
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n",
|
||||
"|**country_or_region_for_holidays**|The country/region used to generate holiday features. These should be ISO 3166 two-letter country/region codes (i.e. 'US', 'GB').|\n",
|
||||
"|**target_lags**|The target_lags specifies how far back we will construct the lags of the target variable.|\n",
|
||||
"|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.\n",
|
||||
"|**cv_step_size**|Number of periods between two consecutive cross-validation folds. The default value is \"auto\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -250,20 +339,16 @@
|
||||
"|-|-|\n",
|
||||
"|**task**|forecasting|\n",
|
||||
"|**primary_metric**|This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>\n",
|
||||
"|**blacklist_models**|Models in blacklist won't be used by AutoML. All supported models can be found at [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.constants.supportedmodels.forecasting?view=azure-ml-py).|\n",
|
||||
"|**blocked_models**|Models in blocked_models won't be used by AutoML. All supported models can be found at [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.constants.supportedmodels.forecasting?view=azure-ml-py).|\n",
|
||||
"|**experiment_timeout_hours**|Experimentation timeout in hours.|\n",
|
||||
"|**training_data**|Input dataset, containing both features and label column.|\n",
|
||||
"|**label_column_name**|The name of the label column.|\n",
|
||||
"|**compute_target**|The remote compute for training.|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|**n_cross_validations**|Number of cross-validation folds to use for model/pipeline selection. The default value is \"auto\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value.\n",
|
||||
"|**enable_early_stopping**|If early stopping is on, training will stop when the primary metric is no longer improving.|\n",
|
||||
"|**time_column_name**|Name of the datetime column in the input data|\n",
|
||||
"|**max_horizon**|Maximum desired forecast horizon in units of time-series frequency|\n",
|
||||
"|**country_or_region**|The country/region used to generate holiday features. These should be ISO 3166 two-letter country/region codes (i.e. 'US', 'GB').|\n",
|
||||
"|**target_lags**|The target_lags specifies how far back we will construct the lags of the target variable.|\n",
|
||||
"|**drop_column_names**|Name(s) of columns to drop prior to modeling|\n",
|
||||
"|**forecasting_parameters**|A class that holds all the forecasting related parameters.|\n",
|
||||
"\n",
|
||||
"This notebook uses the blacklist_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blacklist_models list but you may need to increase the experiment_timeout_hours parameter value to get results."
|
||||
"This notebook uses the blocked_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blocked_models list but you may need to increase the experiment_timeout_hours parameter value to get results."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -281,7 +366,26 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"max_horizon = 14"
|
||||
"forecast_horizon = 14"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Convert prediction type to integer\n",
|
||||
"The featurization configuration can be used to change the default prediction type from decimal numbers to integer. This customization can be used in the scenario when the target column is expected to contain whole values as the number of rented bikes per day."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"featurization_config = FeaturizationConfig()\n",
|
||||
"# Force the target column, to be integer type.\n",
|
||||
"featurization_config.add_prediction_transform_type(\"Integer\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -297,27 +401,33 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"time_series_settings = {\n",
|
||||
" 'time_column_name': time_column_name,\n",
|
||||
" 'max_horizon': max_horizon, \n",
|
||||
" 'country_or_region': 'US', # set country_or_region will trigger holiday featurizer\n",
|
||||
" 'target_lags': 'auto', # use heuristic based lag setting \n",
|
||||
" 'drop_column_names': ['casual', 'registered'] # these columns are a breakdown of the total and therefore a leak\n",
|
||||
"}\n",
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" blacklist_models = ['ExtremeRandomTrees'], \n",
|
||||
" experiment_timeout_hours=0.3,\n",
|
||||
" training_data=train,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" enable_early_stopping=True,\n",
|
||||
" n_cross_validations=3, \n",
|
||||
" max_concurrent_iterations=4,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" **time_series_settings)"
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" country_or_region_for_holidays=\"US\", # set country_or_region will trigger holiday featurizer\n",
|
||||
" target_lags=\"auto\", # use heuristic based lag setting\n",
|
||||
" freq=\"D\", # Set the forecast frequency to be daily\n",
|
||||
" cv_step_size=\"auto\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" featurization=featurization_config,\n",
|
||||
" blocked_models=[\"ExtremeRandomTrees\"],\n",
|
||||
" experiment_timeout_hours=0.3,\n",
|
||||
" training_data=train_dataset,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" enable_early_stopping=True,\n",
|
||||
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||
" max_concurrent_iterations=4,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -333,8 +443,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run = experiment.submit(automl_config, show_output=False)\n",
|
||||
"remote_run"
|
||||
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -350,8 +459,8 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve the Best Model\n",
|
||||
"Below we select the best model from all the training iterations using get_output method."
|
||||
"### Retrieve the Best Run details\n",
|
||||
"Below we retrieve the best Run object from among all the runs in the experiment."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -360,8 +469,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = remote_run.get_output()\n",
|
||||
"fitted_model.steps"
|
||||
"best_run = remote_run.get_best_child()\n",
|
||||
"best_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -370,7 +479,7 @@
|
||||
"source": [
|
||||
"## Featurization\n",
|
||||
"\n",
|
||||
"You can access the engineered feature names generated in time-series featurization. Note that a number of named holiday periods are represented. We recommend that you have at least one year of data when using this feature to ensure that all yearly holidays are captured in the training featurization."
|
||||
"We can look at the engineered feature names generated in time-series featurization via. the JSON file named 'engineered_feature_names.json' under the run outputs. Note that a number of named holiday periods are represented. We recommend that you have at least one year of data when using this feature to ensure that all yearly holidays are captured in the training featurization."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -379,7 +488,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()"
|
||||
"# Download the JSON file locally\n",
|
||||
"best_run.download_file(\n",
|
||||
" \"outputs/engineered_feature_names.json\", \"engineered_feature_names.json\"\n",
|
||||
")\n",
|
||||
"with open(\"engineered_feature_names.json\", \"r\") as f:\n",
|
||||
" records = json.load(f)\n",
|
||||
"\n",
|
||||
"records"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -403,10 +519,26 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the featurization summary as a list of JSON\n",
|
||||
"featurization_summary = fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()\n",
|
||||
"# View the featurization summary as a pandas dataframe\n",
|
||||
"pd.DataFrame.from_records(featurization_summary)"
|
||||
"# Download the featurization summary JSON file locally\n",
|
||||
"best_run.download_file(\n",
|
||||
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Render the JSON as a pandas DataFrame\n",
|
||||
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||
" records = json.load(f)\n",
|
||||
"fs = pd.DataFrame.from_records(records)\n",
|
||||
"\n",
|
||||
"# View a summary of the featurization\n",
|
||||
"fs[\n",
|
||||
" [\n",
|
||||
" \"RawFeatureName\",\n",
|
||||
" \"TypeDetected\",\n",
|
||||
" \"Dropped\",\n",
|
||||
" \"EngineeredFeatureCount\",\n",
|
||||
" \"Transformations\",\n",
|
||||
" ]\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -422,7 +554,7 @@
|
||||
"source": [
|
||||
"We now use the best fitted model from the AutoML Run to make forecasts for the test set. We will do batch scoring on the test dataset which should have the same schema as training dataset.\n",
|
||||
"\n",
|
||||
"The scoring will run on a remote compute. In this example, it will reuse the training compute.|"
|
||||
"The scoring will run on a remote compute. In this example, it will reuse the training compute."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -439,7 +571,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieving forecasts from the model\n",
|
||||
"To run the forecast on the remote compute we will use two helper scripts: forecasting_script and forecasting_helper. These scripts contain the utility methods which will be used by the remote estimator. We copy these scripts to the project folder to upload them to remote compute."
|
||||
"To run the forecast on the remote compute we will use a helper script: forecasting_script. This script contains the utility methods which will be used by the remote estimator. We copy the script to the project folder to upload it to remote compute."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -451,17 +583,16 @@
|
||||
"import os\n",
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"script_folder = os.path.join(os.getcwd(), 'forecast')\n",
|
||||
"script_folder = os.path.join(os.getcwd(), \"forecast\")\n",
|
||||
"os.makedirs(script_folder, exist_ok=True)\n",
|
||||
"shutil.copy('forecasting_script.py', script_folder)\n",
|
||||
"shutil.copy('forecasting_helper.py', script_folder)"
|
||||
"shutil.copy(\"forecasting_script.py\", script_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For brevity we have created the function called run_forecast. It submits the test data to the best model and run the estimation on the selected compute target."
|
||||
"For brevity, we have created a function called run_forecast that submits the test data to the best model determined during the training run and retrieves forecasts. The test set is longer than the forecast horizon specified at train time, so the forecasting script uses a so-called rolling evaluation to generate predictions over the whole test set. A rolling evaluation iterates the forecaster over the test set, using the actuals in the test set to make lag features as needed. "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -472,8 +603,9 @@
|
||||
"source": [
|
||||
"from run_forecast import run_rolling_forecast\n",
|
||||
"\n",
|
||||
"remote_run = run_rolling_forecast(test_experiment, compute_target, best_run, test, max_horizon,\n",
|
||||
" target_column_name, time_column_name)\n",
|
||||
"remote_run = run_rolling_forecast(\n",
|
||||
" test_experiment, compute_target, best_run, test_dataset, target_column_name\n",
|
||||
")\n",
|
||||
"remote_run"
|
||||
]
|
||||
},
|
||||
@@ -490,7 +622,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Download the prediction result for metrics calcuation\n",
|
||||
"### Download the prediction result for metrics calculation\n",
|
||||
"The test data with predictions are saved in artifact outputs/predictions.csv. You can download it and calculation some error metrics for the forecasts and vizualize the predictions vs. the actuals."
|
||||
]
|
||||
},
|
||||
@@ -500,8 +632,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run.download_file('outputs/predictions.csv', 'predictions.csv')\n",
|
||||
"df_all = pd.read_csv('predictions.csv')"
|
||||
"remote_run.download_file(\"outputs/predictions.csv\", \"predictions.csv\")\n",
|
||||
"fcst_df = pd.read_csv(\"predictions.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that the rolling forecast can contain multiple predictions for each date, each from a different forecast origin. For example, consider 2012-09-05:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -510,35 +649,55 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core._vendor.automl.client.core.common import metrics\n",
|
||||
"fcst_df[fcst_df.date == \"2012-09-05\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here, the forecast origin refers to the latest date of actuals available for a given forecast. The earliest origin in the rolling forecast, 2012-08-31, is the last day in the training data. For origin date 2012-09-01, the forecasts use actual recorded counts from the training data *and* the actual count recorded on 2012-09-01. Note that the model is not retrained for origin dates later than 2012-08-31, but the values for model features, such as lagged values of daily count, are updated.\n",
|
||||
"\n",
|
||||
"Let's calculate the metrics over all rolling forecasts:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core.shared import constants\n",
|
||||
"from azureml.automl.runtime.shared.score import scoring\n",
|
||||
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"from automl.client.core.common import constants\n",
|
||||
"\n",
|
||||
"# use automl metrics module\n",
|
||||
"scores = metrics.compute_metrics_regression(\n",
|
||||
" df_all['predicted'],\n",
|
||||
" df_all[target_column_name],\n",
|
||||
" list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||
" None, None, None)\n",
|
||||
"scores = scoring.score_regression(\n",
|
||||
" y_test=fcst_df[target_column_name],\n",
|
||||
" y_pred=fcst_df[\"predicted\"],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"[Test data scores]\\n\")\n",
|
||||
"for key, value in scores.items(): \n",
|
||||
" print('{}: {:.3f}'.format(key, value))\n",
|
||||
" \n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(df_all[target_column_name], df_all[target_column_name], color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"plt.show()"
|
||||
"for key, value in scores.items():\n",
|
||||
" print(\"{}: {:.3f}\".format(key, value))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The MAPE seems high; it is being skewed by an actual with a small absolute value. For a more informative evaluation, we can calculate the metrics by forecast horizon:"
|
||||
"For more details on what metrics are included and how they are calculated, please refer to [supported metrics](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#regressionforecasting-metrics). You could also calculate residuals, like described [here](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#residuals).\n",
|
||||
"\n",
|
||||
"The rolling forecast metric values are very high in comparison to the validation metrics reported by the AutoML job. What's going on here? We will investigate in the following cells!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Forecast versus actuals plot\n",
|
||||
"We will plot predictions and actuals on a time series plot. Since there are many forecasts for each date, we select the 14-day-ahead forecast from each forecast origin for our comparison."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -547,18 +706,36 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from metrics_helper import MAPE, APE\n",
|
||||
"df_all.groupby('horizon_origin').apply(\n",
|
||||
" lambda df: pd.Series({'MAPE': MAPE(df[target_column_name], df['predicted']),\n",
|
||||
" 'RMSE': np.sqrt(mean_squared_error(df[target_column_name], df['predicted'])),\n",
|
||||
" 'MAE': mean_absolute_error(df[target_column_name], df['predicted'])}))"
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"\n",
|
||||
"%matplotlib inline\n",
|
||||
"\n",
|
||||
"fcst_df_h14 = (\n",
|
||||
" fcst_df.groupby(\"forecast_origin\", as_index=False)\n",
|
||||
" .last()\n",
|
||||
" .drop(columns=[\"forecast_origin\"])\n",
|
||||
")\n",
|
||||
"fcst_df_h14.set_index(time_column_name, inplace=True)\n",
|
||||
"plt.plot(fcst_df_h14[[target_column_name, \"predicted\"]])\n",
|
||||
"plt.xticks(rotation=45)\n",
|
||||
"plt.title(f\"Predicted vs. Actuals\")\n",
|
||||
"plt.legend([\"actual\", \"14-day-ahead forecast\"])\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also interesting to see the distributions of APE (absolute percentage error) by horizon. On a log scale, the outlying APE in the horizon-3 group is clear."
|
||||
"Looking at the plot, there are two clear issues:\n",
|
||||
"1. An anomalously low count value on October 29th, 2012.\n",
|
||||
"2. End-of-year holidays (Thanksgiving and Christmas) in late November and late December.\n",
|
||||
"\n",
|
||||
"What happened on Oct. 29th, 2012? That day, Hurricane Sandy brought severe storm surge flooding to the east coast of the United States, particularly around New York City. This is certainly an anomalous event that the model did not account for!\n",
|
||||
"\n",
|
||||
"As for the late year holidays, the model apparently did not learn to account for the full reduction of bike share rentals on these major holidays. The training data covers 2011 and early 2012, so the model fit only had access to a single occurrence of these holidays. This makes it challenging to resolve holiday effects; however, a larger AutoML model search may result in a better model that is more holiday-aware.\n",
|
||||
"\n",
|
||||
"If we filter the predictions prior to the Thanksgiving holiday and remove the anomalous day of 2012-10-29, the metrics are closer to validation levels:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -567,24 +744,23 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df_all_APE = df_all.assign(APE=APE(df_all[target_column_name], df_all['predicted']))\n",
|
||||
"APEs = [df_all_APE[df_all['horizon_origin'] == h].APE.values for h in range(1, max_horizon + 1)]\n",
|
||||
"date_filter = (fcst_df.date != \"2012-10-29\") & (fcst_df.date < \"2012-11-22\")\n",
|
||||
"scores = scoring.score_regression(\n",
|
||||
" y_test=fcst_df[date_filter][target_column_name],\n",
|
||||
" y_pred=fcst_df[date_filter][\"predicted\"],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"%matplotlib inline\n",
|
||||
"plt.boxplot(APEs)\n",
|
||||
"plt.yscale('log')\n",
|
||||
"plt.xlabel('horizon')\n",
|
||||
"plt.ylabel('APE (%)')\n",
|
||||
"plt.title('Absolute Percentage Errors by Forecast Horizon')\n",
|
||||
"\n",
|
||||
"plt.show()"
|
||||
"print(\"[Test data scores (filtered)]\\n\")\n",
|
||||
"for key, value in scores.items():\n",
|
||||
" print(\"{}: {:.3f}\".format(key, value))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "erwright"
|
||||
"name": "jialiu"
|
||||
}
|
||||
],
|
||||
"category": "tutorial",
|
||||
@@ -604,10 +780,13 @@
|
||||
],
|
||||
"friendly_name": "Forecasting BikeShare Demand",
|
||||
"index_order": 1,
|
||||
"kernel_info": {
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -619,18 +798,31 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
"version": "3.8.10"
|
||||
},
|
||||
"microsoft": {
|
||||
"ms_spell_check": {
|
||||
"ms_spell_check_language": "en"
|
||||
}
|
||||
},
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"npconvert_exporter": "python",
|
||||
"nteract": {
|
||||
"version": "nteract-front-end@1.0.0"
|
||||
},
|
||||
"pygments_lexer": "ipython3",
|
||||
"tags": [
|
||||
"Forecasting"
|
||||
],
|
||||
"task": "Forecasting",
|
||||
"version": 3
|
||||
"version": 3,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
name: auto-ml-forecasting-bike-share
|
||||
dependencies:
|
||||
- py-xgboost<=0.90
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- numpy==1.16.2
|
||||
- pandas==0.23.4
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
@@ -1,99 +0,0 @@
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from pandas.tseries.frequencies import to_offset
|
||||
|
||||
|
||||
def align_outputs(y_predicted, X_trans, X_test, y_test, target_column_name,
|
||||
predicted_column_name='predicted',
|
||||
horizon_colname='horizon_origin'):
|
||||
"""
|
||||
Demonstrates how to get the output aligned to the inputs
|
||||
using pandas indexes. Helps understand what happened if
|
||||
the output's shape differs from the input shape, or if
|
||||
the data got re-sorted by time and grain during forecasting.
|
||||
|
||||
Typical causes of misalignment are:
|
||||
* we predicted some periods that were missing in actuals -> drop from eval
|
||||
* model was asked to predict past max_horizon -> increase max horizon
|
||||
* data at start of X_test was needed for lags -> provide previous periods
|
||||
"""
|
||||
|
||||
if (horizon_colname in X_trans):
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
|
||||
horizon_colname: X_trans[horizon_colname]})
|
||||
else:
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
|
||||
|
||||
# y and X outputs are aligned by forecast() function contract
|
||||
df_fcst.index = X_trans.index
|
||||
|
||||
# align original X_test to y_test
|
||||
X_test_full = X_test.copy()
|
||||
X_test_full[target_column_name] = y_test
|
||||
|
||||
# X_test_full's index does not include origin, so reset for merge
|
||||
df_fcst.reset_index(inplace=True)
|
||||
X_test_full = X_test_full.reset_index().drop(columns='index')
|
||||
together = df_fcst.merge(X_test_full, how='right')
|
||||
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = together[together[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
return(clean)
|
||||
|
||||
|
||||
def do_rolling_forecast(fitted_model, X_test, y_test, target_column_name,
|
||||
time_column_name, max_horizon, freq='D'):
|
||||
"""
|
||||
Produce forecasts on a rolling origin over the given test set.
|
||||
|
||||
Each iteration makes a forecast for the next 'max_horizon' periods
|
||||
with respect to the current origin, then advances the origin by the
|
||||
horizon time duration. The prediction context for each forecast is set so
|
||||
that the forecaster uses the actual target values prior to the current
|
||||
origin time for constructing lag features.
|
||||
|
||||
This function returns a concatenated DataFrame of rolling forecasts.
|
||||
"""
|
||||
df_list = []
|
||||
origin_time = X_test[time_column_name].min()
|
||||
while origin_time <= X_test[time_column_name].max():
|
||||
# Set the horizon time - end date of the forecast
|
||||
horizon_time = origin_time + max_horizon * to_offset(freq)
|
||||
|
||||
# Extract test data from an expanding window up-to the horizon
|
||||
expand_wind = (X_test[time_column_name] < horizon_time)
|
||||
X_test_expand = X_test[expand_wind]
|
||||
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
|
||||
y_query_expand.fill(np.NaN)
|
||||
|
||||
if origin_time != X_test[time_column_name].min():
|
||||
# Set the context by including actuals up-to the origin time
|
||||
test_context_expand_wind = (X_test[time_column_name] < origin_time)
|
||||
context_expand_wind = (
|
||||
X_test_expand[time_column_name] < origin_time)
|
||||
y_query_expand[context_expand_wind] = y_test[
|
||||
test_context_expand_wind]
|
||||
|
||||
# Make a forecast out to the maximum horizon
|
||||
y_fcst, X_trans = fitted_model.forecast(X_test_expand, y_query_expand)
|
||||
|
||||
# Align forecast with test set for dates within the
|
||||
# current rolling window
|
||||
trans_tindex = X_trans.index.get_level_values(time_column_name)
|
||||
trans_roll_wind = (trans_tindex >= origin_time) & (
|
||||
trans_tindex < horizon_time)
|
||||
test_roll_wind = expand_wind & (
|
||||
X_test[time_column_name] >= origin_time)
|
||||
df_list.append(align_outputs(y_fcst[trans_roll_wind],
|
||||
X_trans[trans_roll_wind],
|
||||
X_test[test_roll_wind],
|
||||
y_test[test_roll_wind],
|
||||
target_column_name))
|
||||
|
||||
# Advance the origin time
|
||||
origin_time = horizon_time
|
||||
|
||||
return pd.concat(df_list, ignore_index=True)
|
||||
@@ -1,55 +1,53 @@
|
||||
import argparse
|
||||
import azureml.train.automl
|
||||
from azureml.automl.runtime._vendor.automl.client.core.runtime import forecasting_models
|
||||
from azureml.core import Run
|
||||
from sklearn.externals import joblib
|
||||
import forecasting_helper
|
||||
|
||||
from azureml.core import Dataset, Run
|
||||
import joblib
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--max_horizon', type=int, dest='max_horizon',
|
||||
default=10, help='Max Horizon for forecasting')
|
||||
"--target_column_name",
|
||||
type=str,
|
||||
dest="target_column_name",
|
||||
help="Target Column Name",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
help='Target Column Name')
|
||||
parser.add_argument(
|
||||
'--time_column_name', type=str, dest='time_column_name',
|
||||
help='Time Column Name')
|
||||
parser.add_argument(
|
||||
'--frequency', type=str, dest='freq',
|
||||
help='Frequency of prediction')
|
||||
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
max_horizon = args.max_horizon
|
||||
target_column_name = args.target_column_name
|
||||
time_column_name = args.time_column_name
|
||||
freq = args.freq
|
||||
test_dataset_id = args.test_dataset
|
||||
|
||||
run = Run.get_context()
|
||||
# get input dataset by name
|
||||
test_dataset = run.input_datasets['test_data']
|
||||
ws = run.experiment.workspace
|
||||
|
||||
grain_column_names = []
|
||||
# get the input dataset by id
|
||||
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
|
||||
|
||||
df = test_dataset.to_pandas_dataframe().reset_index(drop=True)
|
||||
X_test_df = (
|
||||
test_dataset.drop_columns(columns=[target_column_name])
|
||||
.to_pandas_dataframe()
|
||||
.reset_index(drop=True)
|
||||
)
|
||||
y_test_df = (
|
||||
test_dataset.with_timestamp_columns(None)
|
||||
.keep_columns(columns=[target_column_name])
|
||||
.to_pandas_dataframe()
|
||||
)
|
||||
|
||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name]).to_pandas_dataframe().reset_index(drop=True)
|
||||
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(columns=[target_column_name]).to_pandas_dataframe()
|
||||
fitted_model = joblib.load("model.pkl")
|
||||
|
||||
fitted_model = joblib.load('model.pkl')
|
||||
X_rf = fitted_model.rolling_forecast(X_test_df, y_test_df.values, step=1)
|
||||
|
||||
df_all = forecasting_helper.do_rolling_forecast(
|
||||
fitted_model,
|
||||
X_test_df,
|
||||
y_test_df.values.T[0],
|
||||
target_column_name,
|
||||
time_column_name,
|
||||
max_horizon,
|
||||
freq)
|
||||
# Add predictions, actuals, and horizon relative to rolling origin to the test feature data
|
||||
assign_dict = {
|
||||
fitted_model.forecast_origin_column_name: "forecast_origin",
|
||||
fitted_model.forecast_column_name: "predicted",
|
||||
fitted_model.actual_column_name: target_column_name,
|
||||
}
|
||||
X_rf.rename(columns=assign_dict, inplace=True)
|
||||
|
||||
file_name = 'outputs/predictions.csv'
|
||||
export_csv = df_all.to_csv(file_name, header=True)
|
||||
file_name = "outputs/predictions.csv"
|
||||
export_csv = X_rf.to_csv(file_name, header=True)
|
||||
|
||||
# Upload the predictions into artifacts
|
||||
run.upload_file(name=file_name, path_or_stream=file_name)
|
||||
|
||||
@@ -1,41 +1,40 @@
|
||||
from azureml.core import Environment
|
||||
from azureml.core.conda_dependencies import CondaDependencies
|
||||
from azureml.train.estimator import Estimator
|
||||
from azureml.core.run import Run
|
||||
from azureml.core import ScriptRunConfig
|
||||
|
||||
|
||||
def run_rolling_forecast(test_experiment, compute_target, train_run, test_dataset,
|
||||
max_horizon, target_column_name, time_column_name,
|
||||
freq='D', inference_folder='./forecast'):
|
||||
condafile = inference_folder + '/condafile.yml'
|
||||
train_run.download_file('outputs/model.pkl',
|
||||
inference_folder + '/model.pkl')
|
||||
train_run.download_file('outputs/conda_env_v_1_0_0.yml', condafile)
|
||||
def run_rolling_forecast(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
train_run,
|
||||
test_dataset,
|
||||
target_column_name,
|
||||
inference_folder="./forecast",
|
||||
):
|
||||
train_run.download_file("outputs/model.pkl", inference_folder + "/model.pkl")
|
||||
|
||||
inference_env = Environment("myenv")
|
||||
inference_env.docker.enabled = True
|
||||
inference_env.python.conda_dependencies = CondaDependencies(
|
||||
conda_dependencies_file_path=condafile)
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
est = Estimator(source_directory=inference_folder,
|
||||
entry_script='forecasting_script.py',
|
||||
script_params={
|
||||
'--max_horizon': max_horizon,
|
||||
'--target_column_name': target_column_name,
|
||||
'--time_column_name': time_column_name,
|
||||
'--frequency': freq
|
||||
},
|
||||
inputs=[test_dataset.as_named_input('test_data')],
|
||||
compute_target=compute_target,
|
||||
environment_definition=inference_env)
|
||||
config = ScriptRunConfig(
|
||||
source_directory=inference_folder,
|
||||
script="forecasting_script.py",
|
||||
arguments=[
|
||||
"--target_column_name",
|
||||
target_column_name,
|
||||
"--test_dataset",
|
||||
test_dataset.as_named_input(test_dataset.name),
|
||||
],
|
||||
compute_target=compute_target,
|
||||
environment=inference_env,
|
||||
)
|
||||
|
||||
run = test_experiment.submit(est,
|
||||
tags={
|
||||
'training_run_id': train_run.id,
|
||||
'run_algorithm': train_run.properties['run_algorithm'],
|
||||
'valid_score': train_run.properties['score'],
|
||||
'primary_metric': train_run.properties['primary_metric']
|
||||
})
|
||||
run = test_experiment.submit(
|
||||
config,
|
||||
tags={
|
||||
"training_run_id": train_run.id,
|
||||
"run_algorithm": train_run.properties["run_algorithm"],
|
||||
"valid_score": train_run.properties["score"],
|
||||
"primary_metric": train_run.properties["primary_metric"],
|
||||
},
|
||||
)
|
||||
|
||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
||||
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||
return run
|
||||
|
||||
@@ -16,6 +16,13 @@
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<font color=\"red\" size=\"5\"><strong>!Important!</strong> </br>This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/blob/main/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-task-energy-demand/automl-forecasting-task-energy-demand-advanced-mlflow.ipynb)).</font>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -24,10 +31,11 @@
|
||||
"_**Forecasting using the Energy Demand Dataset**_\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Data and Forecasting Configurations](#Data)\n",
|
||||
"1. [Train](#Train)\n",
|
||||
"1. [Introduction](#introduction)\n",
|
||||
"1. [Setup](#setup)\n",
|
||||
"1. [Data and Forecasting Configurations](#data)\n",
|
||||
"1. [Train](#train)\n",
|
||||
"1. [Generate and Evaluate the Forecast](#forecast)\n",
|
||||
"\n",
|
||||
"Advanced Forecasting\n",
|
||||
"1. [Advanced Training](#advanced_training)\n",
|
||||
@@ -38,26 +46,27 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"# Introduction<a id=\"introduction\"></a>\n",
|
||||
"\n",
|
||||
"In this example we use the associated New York City energy demand dataset to showcase how you can use AutoML for a simple forecasting problem and explore the results. The goal is predict the energy demand for the next 48 hours based on historic time-series data.\n",
|
||||
"\n",
|
||||
"If you are using an Azure Machine Learning [Notebook VM](https://docs.microsoft.com/en-us/azure/machine-learning/service/tutorial-1st-experiment-sdk-setup), you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) first, if you haven't already, to establish your connection to the AzureML Workspace.\n",
|
||||
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) first, if you haven't already, to establish your connection to the AzureML Workspace.\n",
|
||||
"\n",
|
||||
"In this notebook you will learn how to:\n",
|
||||
"1. Creating an Experiment using an existing Workspace\n",
|
||||
"1. Configure AutoML using 'AutoMLConfig'\n",
|
||||
"1. Train the model using AmlCompute\n",
|
||||
"1. Explore the engineered features and results\n",
|
||||
"1. Generate the forecast and compute the out-of-sample accuracy metrics\n",
|
||||
"1. Configuration and remote run of AutoML for a time-series model with lag and rolling window features\n",
|
||||
"1. Run and explore the forecast"
|
||||
"1. Run and explore the forecast with lagging features"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup"
|
||||
"# Setup<a id=\"setup\"></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -66,6 +75,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\n",
|
||||
@@ -88,7 +98,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -97,7 +107,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.4.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -117,7 +126,7 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'automl-forecasting-energydemand'\n",
|
||||
"experiment_name = \"automl-forecasting-energydemand\"\n",
|
||||
"\n",
|
||||
"# # project folder\n",
|
||||
"# project_folder = './sample_projects/automl-forecasting-energy-demand'\n",
|
||||
@@ -125,13 +134,14 @@
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -164,10 +174,11 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=6)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -177,7 +188,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Data\n",
|
||||
"# Data<a id=\"data\"></a>\n",
|
||||
"\n",
|
||||
"We will use energy consumption [data from New York City](http://mis.nyiso.com/public/P-58Blist.htm) for model training. The data is stored in a tabular format and includes energy demand and basic weather data at an hourly frequency. \n",
|
||||
"\n",
|
||||
@@ -202,8 +213,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"target_column_name = 'demand'\n",
|
||||
"time_column_name = 'timeStamp'"
|
||||
"target_column_name = \"demand\"\n",
|
||||
"time_column_name = \"timeStamp\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -212,7 +223,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dataset = Dataset.Tabular.from_delimited_files(path = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv\").with_timestamp_columns(fine_grain_timestamp=time_column_name) \n",
|
||||
"dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=\"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv\"\n",
|
||||
").with_timestamp_columns(fine_grain_timestamp=time_column_name)\n",
|
||||
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
|
||||
]
|
||||
},
|
||||
@@ -254,8 +267,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# split into train based on time\n",
|
||||
"train = dataset.time_before(datetime(2017, 8, 8, 5), include_boundary=True)\n",
|
||||
"train.to_pandas_dataframe().reset_index(drop=True).sort_values(time_column_name).tail(5)"
|
||||
"train = (\n",
|
||||
" dataset.time_before(datetime(2017, 8, 8, 5), include_boundary=True)\n",
|
||||
" .to_pandas_dataframe()\n",
|
||||
" .reset_index(drop=True)\n",
|
||||
")\n",
|
||||
"train.sort_values(time_column_name).tail(5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -265,8 +282,39 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# split into test based on time\n",
|
||||
"test = dataset.time_between(datetime(2017, 8, 8, 6), datetime(2017, 8, 10, 5))\n",
|
||||
"test.to_pandas_dataframe().reset_index(drop=True).head(5)"
|
||||
"test = (\n",
|
||||
" dataset.time_between(datetime(2017, 8, 8, 6), datetime(2017, 8, 10, 5))\n",
|
||||
" .to_pandas_dataframe()\n",
|
||||
" .reset_index(drop=True)\n",
|
||||
")\n",
|
||||
"test.head(5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# register the splitted train and test data in workspace storage\n",
|
||||
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||
"\n",
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"train_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" train, target=(datastore, \"dataset/\"), name=\"nyc_energy_train\"\n",
|
||||
")\n",
|
||||
"test_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" test, target=(datastore, \"dataset/\"), name=\"nyc_energy_test\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -288,37 +336,51 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"max_horizon = 48"
|
||||
"forecast_horizon = 48"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train\n",
|
||||
"## Forecasting Parameters\n",
|
||||
"To define forecasting parameters for your experiment training, you can leverage the ForecastingParameters class. The table below details the forecasting parameter we will be passing into our experiment.\n",
|
||||
"\n",
|
||||
"Instantiate an AutoMLConfig object. This config defines the settings and data used to run the experiment. We can provide extra configurations within 'automl_settings', for this forecasting task we add the name of the time column and the maximum forecast horizon.\n",
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**time_column_name**|The name of your time column.|\n",
|
||||
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n",
|
||||
"|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.\n",
|
||||
"|**cv_step_size**|Number of periods between two consecutive cross-validation folds. The default value is \"auto\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Train<a id=\"train\"></a>\n",
|
||||
"\n",
|
||||
"Instantiate an AutoMLConfig object. This config defines the settings and data used to run the experiment. We can provide extra configurations within 'automl_settings', for this forecasting task we add the forecasting parameters to hold all the additional forecasting parameters.\n",
|
||||
"\n",
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**task**|forecasting|\n",
|
||||
"|**primary_metric**|This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>|\n",
|
||||
"|**blacklist_models**|Models in blacklist won't be used by AutoML. All supported models can be found at [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.constants.supportedmodels.forecasting?view=azure-ml-py).|\n",
|
||||
"|**blocked_models**|Models in blocked_models won't be used by AutoML. All supported models can be found at [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.constants.supportedmodels.forecasting?view=azure-ml-py).|\n",
|
||||
"|**experiment_timeout_hours**|Maximum amount of time in hours that the experiment take before it terminates.|\n",
|
||||
"|**training_data**|The training data to be used within the experiment.|\n",
|
||||
"|**label_column_name**|The name of the label column.|\n",
|
||||
"|**compute_target**|The remote compute for training.|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way.|\n",
|
||||
"|**n_cross_validations**|Number of cross-validation folds to use for model/pipeline selection. The default value is \"auto\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value.\n",
|
||||
"|**enable_early_stopping**|Flag to enble early termination if the score is not improving in the short term.|\n",
|
||||
"|**time_column_name**|The name of your time column.|\n",
|
||||
"|**max_horizon**|The number of periods out you would like to predict past your training data. Periods are inferred from your data.|\n"
|
||||
"|**forecasting_parameters**|A class holds all the forecasting related parameters.|\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook uses the blacklist_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blacklist_models list but you may need to increase the experiment_timeout_hours parameter value to get results."
|
||||
"This notebook uses the blocked_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blocked_models list but you may need to increase the experiment_timeout_hours parameter value to get results."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -327,22 +389,28 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" 'time_column_name': time_column_name,\n",
|
||||
" 'max_horizon': max_horizon,\n",
|
||||
"}\n",
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" blacklist_models = ['ExtremeRandomTrees', 'AutoArima', 'Prophet'], \n",
|
||||
" experiment_timeout_hours=0.3,\n",
|
||||
" training_data=train,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" enable_early_stopping=True,\n",
|
||||
" n_cross_validations=3, \n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" **automl_settings)"
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" freq=\"H\", # Set the forecast frequency to be hourly\n",
|
||||
" cv_step_size=\"auto\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" blocked_models=[\"ExtremeRandomTrees\", \"AutoArima\", \"Prophet\"],\n",
|
||||
" experiment_timeout_hours=0.3,\n",
|
||||
" training_data=train_dataset,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" enable_early_stopping=True,\n",
|
||||
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -362,15 +430,6 @@
|
||||
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -384,8 +443,8 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieve the Best Model\n",
|
||||
"Below we select the best model from all the training iterations using get_output method."
|
||||
"## Retrieve the Best Run details\n",
|
||||
"Below we retrieve the best Run object from among all the runs in the experiment."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -394,8 +453,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = remote_run.get_output()\n",
|
||||
"fitted_model.steps"
|
||||
"best_run = remote_run.get_best_child()\n",
|
||||
"best_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -403,7 +462,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Featurization\n",
|
||||
"You can access the engineered feature names generated in time-series featurization."
|
||||
"We can look at the engineered feature names generated in time-series featurization via. the JSON file named 'engineered_feature_names.json' under the run outputs."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -412,7 +471,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()"
|
||||
"# Download the JSON file locally\n",
|
||||
"best_run.download_file(\n",
|
||||
" \"outputs/engineered_feature_names.json\", \"engineered_feature_names.json\"\n",
|
||||
")\n",
|
||||
"with open(\"engineered_feature_names.json\", \"r\") as f:\n",
|
||||
" records = json.load(f)\n",
|
||||
"\n",
|
||||
"records"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -435,37 +501,37 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the featurization summary as a list of JSON\n",
|
||||
"featurization_summary = fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()\n",
|
||||
"# View the featurization summary as a pandas dataframe\n",
|
||||
"pd.DataFrame.from_records(featurization_summary)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Forecasting\n",
|
||||
"# Download the featurization summary JSON file locally\n",
|
||||
"best_run.download_file(\n",
|
||||
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"Now that we have retrieved the best pipeline/model, it can be used to make predictions on test data. First, we remove the target values from the test set:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_test = test.to_pandas_dataframe().reset_index(drop=True)\n",
|
||||
"y_test = X_test.pop(target_column_name).values"
|
||||
"# Render the JSON as a pandas DataFrame\n",
|
||||
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||
" records = json.load(f)\n",
|
||||
"fs = pd.DataFrame.from_records(records)\n",
|
||||
"\n",
|
||||
"# View a summary of the featurization\n",
|
||||
"fs[\n",
|
||||
" [\n",
|
||||
" \"RawFeatureName\",\n",
|
||||
" \"TypeDetected\",\n",
|
||||
" \"Dropped\",\n",
|
||||
" \"EngineeredFeatureCount\",\n",
|
||||
" \"Transformations\",\n",
|
||||
" ]\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Forecast Function\n",
|
||||
"For forecasting, we will use the forecast function instead of the predict function. Using the predict method would result in getting predictions for EVERY horizon the forecaster can predict at. This is useful when training and evaluating the performance of the forecaster at various horizons, but the level of detail is excessive for normal use. Forecast function also can handle more complicated scenarios, see notebook on [high frequency forecasting](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.ipynb)."
|
||||
"# Forecasting<a id=\"forecast\"></a>\n",
|
||||
"\n",
|
||||
"Now that we have retrieved the best pipeline/model, it can be used to make predictions on test data. We will do batch scoring on the test dataset which should have the same schema as training dataset.\n",
|
||||
"\n",
|
||||
"The inference will run on a remote compute. In this example, it will re-use the training compute."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -474,10 +540,36 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# The featurized data, aligned to y, will also be returned.\n",
|
||||
"# This contains the assumptions that were made in the forecast\n",
|
||||
"# and helps align the forecast to the original data\n",
|
||||
"y_predictions, X_trans = fitted_model.forecast(X_test)"
|
||||
"test_experiment = Experiment(ws, experiment_name + \"_inference\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieving forecasts from the model\n",
|
||||
"We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from run_forecast import run_remote_inference\n",
|
||||
"\n",
|
||||
"remote_run_infer = run_remote_inference(\n",
|
||||
" test_experiment=test_experiment,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" train_run=best_run,\n",
|
||||
" test_dataset=test_dataset,\n",
|
||||
" target_column_name=target_column_name,\n",
|
||||
")\n",
|
||||
"remote_run_infer.wait_for_completion(show_output=False)\n",
|
||||
"\n",
|
||||
"# download the inference output file to the local machine\n",
|
||||
"remote_run_infer.download_file(\"outputs/predictions.csv\", \"predictions.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -485,9 +577,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Evaluate\n",
|
||||
"To evaluate the accuracy of the forecast, we'll compare against the actual sales quantities for some select metrics, included the mean absolute percentage error (MAPE).\n",
|
||||
"\n",
|
||||
"It is a good practice to always align the output explicitly to the input, as the count and order of the rows may have changed during transformations that span multiple rows."
|
||||
"To evaluate the accuracy of the forecast, we'll compare against the actual sales quantities for some select metrics, included the mean absolute percentage error (MAPE). For more metrics that can be used for evaluation after training, please see [supported metrics](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#regressionforecasting-metrics), and [how to calculate residuals](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#residuals)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -496,9 +586,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from forecasting_helper import align_outputs\n",
|
||||
"\n",
|
||||
"df_all = align_outputs(y_predictions, X_trans, X_test, y_test, target_column_name)"
|
||||
"# load forecast data frame\n",
|
||||
"fcst_df = pd.read_csv(\"predictions.csv\", parse_dates=[time_column_name])\n",
|
||||
"fcst_df.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -507,26 +597,30 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core._vendor.automl.client.core.common import metrics\n",
|
||||
"from azureml.automl.core.shared import constants\n",
|
||||
"from azureml.automl.runtime.shared.score import scoring\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"from automl.client.core.common import constants\n",
|
||||
"\n",
|
||||
"# use automl metrics module\n",
|
||||
"scores = metrics.compute_metrics_regression(\n",
|
||||
" df_all['predicted'],\n",
|
||||
" df_all[target_column_name],\n",
|
||||
" list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||
" None, None, None)\n",
|
||||
"scores = scoring.score_regression(\n",
|
||||
" y_test=fcst_df[target_column_name],\n",
|
||||
" y_pred=fcst_df[\"predicted\"],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"[Test data scores]\\n\")\n",
|
||||
"for key, value in scores.items(): \n",
|
||||
" print('{}: {:.3f}'.format(key, value))\n",
|
||||
" \n",
|
||||
"for key, value in scores.items():\n",
|
||||
" print(\"{}: {:.3f}\".format(key, value))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(df_all[target_column_name], df_all[target_column_name], color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df[\"predicted\"], color=\"b\")\n",
|
||||
"test_test = plt.scatter(\n",
|
||||
" fcst_df[target_column_name], fcst_df[target_column_name], color=\"g\"\n",
|
||||
")\n",
|
||||
"plt.legend(\n",
|
||||
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||
")\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
@@ -534,24 +628,8 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at `X_trans` is also useful to see what featurization happened to the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_trans"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Advanced Training <a id=\"advanced_training\"></a>\n",
|
||||
"We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, grain and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation."
|
||||
"# Advanced Training <a id=\"advanced_training\"></a>\n",
|
||||
"We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -559,9 +637,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using lags and rolling window features\n",
|
||||
"Now we will configure the target lags, that is the previous values of the target variables, meaning the prediction is no longer horizon-less. We therefore must still specify the `max_horizon` that the model will learn to forecast. The `target_lags` keyword specifies how far back we will construct the lags of the target variable, and the `target_rolling_window_size` specifies the size of the rolling window over which we will generate the `max`, `min` and `sum` features.\n",
|
||||
"Now we will configure the target lags, that is the previous values of the target variables, meaning the prediction is no longer horizon-less. We therefore must still specify the `forecast_horizon` that the model will learn to forecast. The `target_lags` keyword specifies how far back we will construct the lags of the target variable, and the `target_rolling_window_size` specifies the size of the rolling window over which we will generate the `max`, `min` and `sum` features.\n",
|
||||
"\n",
|
||||
"This notebook uses the blacklist_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blacklist_models list but you may need to increase the iteration_timeout_minutes parameter value to get results."
|
||||
"This notebook uses the blocked_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blocked_models list but you may need to increase the iteration_timeout_minutes parameter value to get results."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -570,24 +648,35 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_advanced_settings = {\n",
|
||||
" 'time_column_name': time_column_name,\n",
|
||||
" 'max_horizon': max_horizon,\n",
|
||||
" 'target_lags': 12,\n",
|
||||
" 'target_rolling_window_size': 4,\n",
|
||||
"}\n",
|
||||
"advanced_forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" target_lags=12,\n",
|
||||
" target_rolling_window_size=4,\n",
|
||||
" cv_step_size=\"auto\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" blacklist_models = ['ElasticNet','ExtremeRandomTrees','GradientBoosting','XGBoostRegressor','ExtremeRandomTrees', 'AutoArima', 'Prophet'], #These models are blacklisted for tutorial purposes, remove this for real use cases. \n",
|
||||
" experiment_timeout_hours=0.3,\n",
|
||||
" training_data=train,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" enable_early_stopping = True,\n",
|
||||
" n_cross_validations=3, \n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" **automl_advanced_settings)"
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" blocked_models=[\n",
|
||||
" \"ElasticNet\",\n",
|
||||
" \"ExtremeRandomTrees\",\n",
|
||||
" \"GradientBoosting\",\n",
|
||||
" \"XGBoostRegressor\",\n",
|
||||
" \"ExtremeRandomTrees\",\n",
|
||||
" \"AutoArima\",\n",
|
||||
" \"Prophet\",\n",
|
||||
" ], # These models are blocked for tutorial purposes, remove this for real use cases.\n",
|
||||
" experiment_timeout_hours=0.3,\n",
|
||||
" training_data=train_dataset,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" enable_early_stopping=True,\n",
|
||||
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" forecasting_parameters=advanced_forecasting_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -619,7 +708,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve the Best Model"
|
||||
"### Retrieve the Best Run details"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -628,15 +717,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run_lags, fitted_model_lags = advanced_remote_run.get_output()"
|
||||
"best_run_lags = remote_run.get_best_child()\n",
|
||||
"best_run_lags"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Advanced Results<a id=\"advanced_results\"></a>\n",
|
||||
"We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, grain and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation."
|
||||
"# Advanced Results<a id=\"advanced_results\"></a>\n",
|
||||
"We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -645,21 +735,21 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# The featurized data, aligned to y, will also be returned.\n",
|
||||
"# This contains the assumptions that were made in the forecast\n",
|
||||
"# and helps align the forecast to the original data\n",
|
||||
"y_predictions, X_trans = fitted_model_lags.forecast(X_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from forecasting_helper import align_outputs\n",
|
||||
"test_experiment_advanced = Experiment(ws, experiment_name + \"_inference_advanced\")\n",
|
||||
"advanced_remote_run_infer = run_remote_inference(\n",
|
||||
" test_experiment=test_experiment_advanced,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" train_run=best_run_lags,\n",
|
||||
" test_dataset=test_dataset,\n",
|
||||
" target_column_name=target_column_name,\n",
|
||||
" inference_folder=\"./forecast_advanced\",\n",
|
||||
")\n",
|
||||
"advanced_remote_run_infer.wait_for_completion(show_output=False)\n",
|
||||
"\n",
|
||||
"df_all = align_outputs(y_predictions, X_trans, X_test, y_test, target_column_name)"
|
||||
"# download the inference output file to the local machine\n",
|
||||
"advanced_remote_run_infer.download_file(\n",
|
||||
" \"outputs/predictions.csv\", \"predictions_advanced.csv\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -668,26 +758,42 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core._vendor.automl.client.core.common import metrics\n",
|
||||
"fcst_adv_df = pd.read_csv(\"predictions_advanced.csv\", parse_dates=[time_column_name])\n",
|
||||
"fcst_adv_df.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core.shared import constants\n",
|
||||
"from azureml.automl.runtime.shared.score import scoring\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"from automl.client.core.common import constants\n",
|
||||
"\n",
|
||||
"# use automl metrics module\n",
|
||||
"scores = metrics.compute_metrics_regression(\n",
|
||||
" df_all['predicted'],\n",
|
||||
" df_all[target_column_name],\n",
|
||||
" list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||
" None, None, None)\n",
|
||||
"scores = scoring.score_regression(\n",
|
||||
" y_test=fcst_adv_df[target_column_name],\n",
|
||||
" y_pred=fcst_adv_df[\"predicted\"],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"[Test data scores]\\n\")\n",
|
||||
"for key, value in scores.items(): \n",
|
||||
" print('{}: {:.3f}'.format(key, value))\n",
|
||||
" \n",
|
||||
"for key, value in scores.items():\n",
|
||||
" print(\"{}: {:.3f}\".format(key, value))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(df_all[target_column_name], df_all[target_column_name], color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"test_pred = plt.scatter(\n",
|
||||
" fcst_adv_df[target_column_name], fcst_adv_df[\"predicted\"], color=\"b\"\n",
|
||||
")\n",
|
||||
"test_test = plt.scatter(\n",
|
||||
" fcst_adv_df[target_column_name], fcst_adv_df[target_column_name], color=\"g\"\n",
|
||||
")\n",
|
||||
"plt.legend(\n",
|
||||
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||
")\n",
|
||||
"plt.show()"
|
||||
]
|
||||
}
|
||||
@@ -695,17 +801,20 @@
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "erwright"
|
||||
"name": "jialiu"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"how-to-use-azureml",
|
||||
"automated-machine-learning"
|
||||
],
|
||||
"kernel_info": {
|
||||
"name": "python3"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -717,9 +826,22 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
"version": "3.8.10"
|
||||
},
|
||||
"microsoft": {
|
||||
"ms_spell_check": {
|
||||
"ms_spell_check_language": "en"
|
||||
}
|
||||
},
|
||||
"nteract": {
|
||||
"version": "nteract-front-end@1.0.0"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
name: auto-ml-forecasting-energy-demand
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- numpy==1.16.2
|
||||
- pandas==0.23.4
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
@@ -1,44 +0,0 @@
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from pandas.tseries.frequencies import to_offset
|
||||
|
||||
|
||||
def align_outputs(y_predicted, X_trans, X_test, y_test, target_column_name,
|
||||
predicted_column_name='predicted',
|
||||
horizon_colname='horizon_origin'):
|
||||
"""
|
||||
Demonstrates how to get the output aligned to the inputs
|
||||
using pandas indexes. Helps understand what happened if
|
||||
the output's shape differs from the input shape, or if
|
||||
the data got re-sorted by time and grain during forecasting.
|
||||
|
||||
Typical causes of misalignment are:
|
||||
* we predicted some periods that were missing in actuals -> drop from eval
|
||||
* model was asked to predict past max_horizon -> increase max horizon
|
||||
* data at start of X_test was needed for lags -> provide previous periods
|
||||
"""
|
||||
|
||||
if (horizon_colname in X_trans):
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
|
||||
horizon_colname: X_trans[horizon_colname]})
|
||||
else:
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
|
||||
|
||||
# y and X outputs are aligned by forecast() function contract
|
||||
df_fcst.index = X_trans.index
|
||||
|
||||
# align original X_test to y_test
|
||||
X_test_full = X_test.copy()
|
||||
X_test_full[target_column_name] = y_test
|
||||
|
||||
# X_test_full's index does not include origin, so reset for merge
|
||||
df_fcst.reset_index(inplace=True)
|
||||
X_test_full = X_test_full.reset_index().drop(columns='index')
|
||||
together = df_fcst.merge(X_test_full, how='right')
|
||||
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = together[together[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
return(clean)
|
||||
@@ -0,0 +1,61 @@
|
||||
"""
|
||||
This is the script that is executed on the compute instance. It relies
|
||||
on the model.pkl file which is uploaded along with this script to the
|
||||
compute instance.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
from azureml.core import Dataset, Run
|
||||
import joblib
|
||||
from pandas.tseries.frequencies import to_offset
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--target_column_name",
|
||||
type=str,
|
||||
dest="target_column_name",
|
||||
help="Target Column Name",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
target_column_name = args.target_column_name
|
||||
test_dataset_id = args.test_dataset
|
||||
|
||||
run = Run.get_context()
|
||||
ws = run.experiment.workspace
|
||||
|
||||
# get the input dataset by id
|
||||
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
|
||||
|
||||
X_test = test_dataset.to_pandas_dataframe().reset_index(drop=True)
|
||||
y_test = X_test.pop(target_column_name).values
|
||||
|
||||
# generate forecast
|
||||
fitted_model = joblib.load("model.pkl")
|
||||
# We have default quantiles values set as below(95th percentile)
|
||||
quantiles = [0.025, 0.5, 0.975]
|
||||
predicted_column_name = "predicted"
|
||||
PI = "prediction_interval"
|
||||
fitted_model.quantiles = quantiles
|
||||
pred_quantiles = fitted_model.forecast_quantiles(X_test)
|
||||
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
|
||||
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
|
||||
)
|
||||
X_test[target_column_name] = y_test
|
||||
X_test[PI] = pred_quantiles[PI]
|
||||
X_test[predicted_column_name] = pred_quantiles[0.5]
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = X_test[
|
||||
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
|
||||
]
|
||||
|
||||
file_name = "outputs/predictions.csv"
|
||||
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
|
||||
|
||||
# Upload the predictions into artifacts
|
||||
run.upload_file(name=file_name, path_or_stream=file_name)
|
||||
@@ -1,22 +0,0 @@
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
|
||||
def APE(actual, pred):
|
||||
"""
|
||||
Calculate absolute percentage error.
|
||||
Returns a vector of APE values with same length as actual/pred.
|
||||
"""
|
||||
return 100 * np.abs((actual - pred) / actual)
|
||||
|
||||
|
||||
def MAPE(actual, pred):
|
||||
"""
|
||||
Calculate mean absolute percentage error.
|
||||
Remove NA and values where actual is close to zero
|
||||
"""
|
||||
not_na = ~(np.isnan(actual) | np.isnan(pred))
|
||||
not_zero = ~np.isclose(actual, 0.0)
|
||||
actual_safe = actual[not_na & not_zero]
|
||||
pred_safe = pred[not_na & not_zero]
|
||||
return np.mean(APE(actual_safe, pred_safe))
|
||||
@@ -0,0 +1,49 @@
|
||||
import os
|
||||
import shutil
|
||||
from azureml.core import ScriptRunConfig
|
||||
|
||||
|
||||
def run_remote_inference(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
train_run,
|
||||
test_dataset,
|
||||
target_column_name,
|
||||
inference_folder="./forecast",
|
||||
):
|
||||
# Create local directory to copy the model.pkl and forecsting_script.py files into.
|
||||
# These files will be uploaded to and executed on the compute instance.
|
||||
os.makedirs(inference_folder, exist_ok=True)
|
||||
shutil.copy("forecasting_script.py", inference_folder)
|
||||
|
||||
train_run.download_file(
|
||||
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
|
||||
)
|
||||
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
config = ScriptRunConfig(
|
||||
source_directory=inference_folder,
|
||||
script="forecasting_script.py",
|
||||
arguments=[
|
||||
"--target_column_name",
|
||||
target_column_name,
|
||||
"--test_dataset",
|
||||
test_dataset.as_named_input(test_dataset.name),
|
||||
],
|
||||
compute_target=compute_target,
|
||||
environment=inference_env,
|
||||
)
|
||||
|
||||
run = test_experiment.submit(
|
||||
config,
|
||||
tags={
|
||||
"training_run_id": train_run.id,
|
||||
"run_algorithm": train_run.properties["run_algorithm"],
|
||||
"valid_score": train_run.properties["score"],
|
||||
"primary_metric": train_run.properties["primary_metric"],
|
||||
},
|
||||
)
|
||||
|
||||
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||
return run
|
||||
@@ -24,7 +24,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"This notebook demonstrates the full interface to the `forecast()` function. \n",
|
||||
"This notebook demonstrates the full interface of the `forecast()` function. \n",
|
||||
"\n",
|
||||
"The best known and most frequent usage of `forecast` enables forecasting on test sets that immediately follows training data. \n",
|
||||
"\n",
|
||||
@@ -35,7 +35,6 @@
|
||||
"Terminology:\n",
|
||||
"* forecast origin: the last period when the target value is known\n",
|
||||
"* forecast periods(s): the period(s) for which the value of the target is desired.\n",
|
||||
"* forecast horizon: the number of forecast periods\n",
|
||||
"* lookback: how many past periods (before forecast origin) the model function depends on. The larger of number of lags and length of rolling window.\n",
|
||||
"* prediction context: `lookback` periods immediately preceding the forecast origin\n",
|
||||
"\n",
|
||||
@@ -53,7 +52,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Please make sure you have followed the `configuration.ipynb` notebook so that your ML workspace information is saved in the config file."
|
||||
"Please make sure you have followed the [configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) so that your ML workspace information is saved in the config file."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -86,7 +85,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -95,7 +94,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.4.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -112,19 +110,20 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'automl-forecast-function-demo'\n",
|
||||
"experiment_name = \"automl-forecast-function-demo\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['SKU'] = ws.sku\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"SKU\"] = ws.sku\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -142,17 +141,20 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"TIME_COLUMN_NAME = 'date'\n",
|
||||
"GRAIN_COLUMN_NAME = 'grain'\n",
|
||||
"TARGET_COLUMN_NAME = 'y'\n",
|
||||
"TIME_COLUMN_NAME = \"date\"\n",
|
||||
"TIME_SERIES_ID_COLUMN_NAME = \"time_series_id\"\n",
|
||||
"TARGET_COLUMN_NAME = \"y\"\n",
|
||||
"\n",
|
||||
"def get_timeseries(train_len: int,\n",
|
||||
" test_len: int,\n",
|
||||
" time_column_name: str,\n",
|
||||
" target_column_name: str,\n",
|
||||
" grain_column_name: str,\n",
|
||||
" grains: int = 1,\n",
|
||||
" freq: str = 'H'):\n",
|
||||
"\n",
|
||||
"def get_timeseries(\n",
|
||||
" train_len: int,\n",
|
||||
" test_len: int,\n",
|
||||
" time_column_name: str,\n",
|
||||
" target_column_name: str,\n",
|
||||
" time_series_id_column_name: str,\n",
|
||||
" time_series_number: int = 1,\n",
|
||||
" freq: str = \"H\",\n",
|
||||
"):\n",
|
||||
" \"\"\"\n",
|
||||
" Return the time series of designed length.\n",
|
||||
"\n",
|
||||
@@ -162,9 +164,8 @@
|
||||
" :type test_len: int\n",
|
||||
" :param time_column_name: The desired name of a time column.\n",
|
||||
" :type time_column_name: str\n",
|
||||
" :param\n",
|
||||
" :param grains: The number of grains.\n",
|
||||
" :type grains: int\n",
|
||||
" :param time_series_number: The number of time series in the data set.\n",
|
||||
" :type time_series_number: int\n",
|
||||
" :param freq: The frequency string representing pandas offset.\n",
|
||||
" see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n",
|
||||
" :type freq: str\n",
|
||||
@@ -175,15 +176,19 @@
|
||||
" data_train = [] # type: List[pd.DataFrame]\n",
|
||||
" data_test = [] # type: List[pd.DataFrame]\n",
|
||||
" data_length = train_len + test_len\n",
|
||||
" for i in range(grains):\n",
|
||||
" X = pd.DataFrame({\n",
|
||||
" time_column_name: pd.date_range(start='2000-01-01',\n",
|
||||
" periods=data_length,\n",
|
||||
" freq=freq),\n",
|
||||
" target_column_name: np.arange(data_length).astype(float) + np.random.rand(data_length) + i*5,\n",
|
||||
" 'ext_predictor': np.asarray(range(42, 42 + data_length)),\n",
|
||||
" grain_column_name: np.repeat('g{}'.format(i), data_length)\n",
|
||||
" })\n",
|
||||
" for i in range(time_series_number):\n",
|
||||
" X = pd.DataFrame(\n",
|
||||
" {\n",
|
||||
" time_column_name: pd.date_range(\n",
|
||||
" start=\"2000-01-01\", periods=data_length, freq=freq\n",
|
||||
" ),\n",
|
||||
" target_column_name: np.arange(data_length).astype(float)\n",
|
||||
" + np.random.rand(data_length)\n",
|
||||
" + i * 5,\n",
|
||||
" \"ext_predictor\": np.asarray(range(42, 42 + data_length)),\n",
|
||||
" time_series_id_column_name: np.repeat(\"ts{}\".format(i), data_length),\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
" data_train.append(X[:train_len])\n",
|
||||
" data_test.append(X[train_len:])\n",
|
||||
" X_train = pd.concat(data_train)\n",
|
||||
@@ -192,14 +197,17 @@
|
||||
" y_test = X_test.pop(target_column_name).values\n",
|
||||
" return X_train, y_train, X_test, y_test\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"n_test_periods = 6\n",
|
||||
"n_train_periods = 30\n",
|
||||
"X_train, y_train, X_test, y_test = get_timeseries(train_len=n_train_periods,\n",
|
||||
" test_len=n_test_periods,\n",
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" target_column_name=TARGET_COLUMN_NAME,\n",
|
||||
" grain_column_name=GRAIN_COLUMN_NAME,\n",
|
||||
" grains=2)"
|
||||
"X_train, y_train, X_test, y_test = get_timeseries(\n",
|
||||
" train_len=n_train_periods,\n",
|
||||
" test_len=n_test_periods,\n",
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" target_column_name=TARGET_COLUMN_NAME,\n",
|
||||
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
|
||||
" time_series_number=2,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -226,11 +234,12 @@
|
||||
"source": [
|
||||
"# plot the example time series\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"whole_data = X_train.copy()\n",
|
||||
"target_label = 'y'\n",
|
||||
"target_label = \"y\"\n",
|
||||
"whole_data[target_label] = y_train\n",
|
||||
"for g in whole_data.groupby('grain'): \n",
|
||||
" plt.plot(g[1]['date'].values, g[1]['y'].values, label=g[0])\n",
|
||||
"for g in whole_data.groupby(\"time_series_id\"):\n",
|
||||
" plt.plot(g[1][\"date\"].values, g[1][\"y\"].values, label=g[0])\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()"
|
||||
]
|
||||
@@ -252,12 +261,12 @@
|
||||
"# We need to save thw artificial data and then upload them to default workspace datastore.\n",
|
||||
"DATA_PATH = \"fc_fn_data\"\n",
|
||||
"DATA_PATH_X = \"{}/data_train.csv\".format(DATA_PATH)\n",
|
||||
"if not os.path.isdir('data'):\n",
|
||||
" os.mkdir('data')\n",
|
||||
"if not os.path.isdir(\"data\"):\n",
|
||||
" os.mkdir(\"data\")\n",
|
||||
"pd.DataFrame(whole_data).to_csv(\"data/data_train.csv\", index=False)\n",
|
||||
"# Upload saved data to the default data store.\n",
|
||||
"ds = ws.get_default_datastore()\n",
|
||||
"ds.upload(src_dir='./data', target_path=DATA_PATH, overwrite=True, show_progress=True)\n",
|
||||
"ds.upload(src_dir=\"./data\", target_path=DATA_PATH, overwrite=True, show_progress=True)\n",
|
||||
"train_data = Dataset.Tabular.from_delimited_files(path=ds.path(DATA_PATH_X))"
|
||||
]
|
||||
},
|
||||
@@ -265,7 +274,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource."
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -283,10 +294,11 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
||||
" max_nodes=6)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -298,13 +310,14 @@
|
||||
"source": [
|
||||
"## Create the configuration and train a forecaster <a id=\"train\"></a>\n",
|
||||
"First generate the configuration, in which we:\n",
|
||||
"* Set metadata columns: target, time column and grain column names.\n",
|
||||
"* Set metadata columns: target, time column and time-series id column names.\n",
|
||||
"* Validate our data using cross validation with rolling window method.\n",
|
||||
"* Set normalized root mean squared error as a metric to select the best model.\n",
|
||||
"* Set early termination to True, so the iterations through the models will stop when no improvements in accuracy score will be made.\n",
|
||||
"* Set limitations on the length of experiment run to 15 minutes.\n",
|
||||
"* Finally, we set the task to be forecasting.\n",
|
||||
"* We apply the lag lead operator to the target value i.e. we use the previous values as a predictor for the future ones."
|
||||
"* We apply the lag lead operator to the target value i.e. we use the previous values as a predictor for the future ones.\n",
|
||||
"* [Optional] Forecast frequency parameter (freq) represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -313,21 +326,25 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"lags = [1,2,3]\n",
|
||||
"max_horizon = n_test_periods\n",
|
||||
"time_series_settings = { \n",
|
||||
" 'time_column_name': TIME_COLUMN_NAME,\n",
|
||||
" 'grain_column_names': [ GRAIN_COLUMN_NAME ],\n",
|
||||
" 'max_horizon': max_horizon,\n",
|
||||
" 'target_lags': lags\n",
|
||||
"}"
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"\n",
|
||||
"lags = [1, 2, 3]\n",
|
||||
"forecast_horizon = n_test_periods\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" time_series_id_column_names=[TIME_SERIES_ID_COLUMN_NAME],\n",
|
||||
" target_lags=lags,\n",
|
||||
" freq=\"H\", # Set the forecast frequency to be hourly,\n",
|
||||
" cv_step_size=\"auto\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Run the model selection and training process."
|
||||
"Run the model selection and training process. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -341,19 +358,21 @@
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting',\n",
|
||||
" debug_log='automl_forecasting_function.log',\n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" experiment_timeout_hours=0.25,\n",
|
||||
" enable_early_stopping=True,\n",
|
||||
" training_data=train_data,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" n_cross_validations=3,\n",
|
||||
" verbosity = logging.INFO,\n",
|
||||
" max_concurrent_iterations=4,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" label_column_name=target_label,\n",
|
||||
" **time_series_settings)\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" debug_log=\"automl_forecasting_function.log\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" experiment_timeout_hours=0.25,\n",
|
||||
" enable_early_stopping=True,\n",
|
||||
" training_data=train_data,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" max_concurrent_iterations=4,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" label_column_name=target_label,\n",
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||
]
|
||||
@@ -430,7 +449,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_pred_no_gap, xy_nogap = fitted_model.forecast(X_test)\n",
|
||||
"y_pred_no_gap, xy_nogap = fitted_model.forecast(X_test)\n",
|
||||
"\n",
|
||||
"# xy_nogap contains the predictions in the _automl_target_col column.\n",
|
||||
"# Those same numbers are output in y_pred_no_gap\n",
|
||||
@@ -458,7 +477,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"quantiles = fitted_model.forecast_quantiles(X_test)\n",
|
||||
"quantiles = fitted_model.forecast_quantiles(X_test)\n",
|
||||
"quantiles"
|
||||
]
|
||||
},
|
||||
@@ -478,12 +497,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# specify which quantiles you would like \n",
|
||||
"# specify which quantiles you would like\n",
|
||||
"fitted_model.quantiles = [0.01, 0.5, 0.95]\n",
|
||||
"# use forecast_quantiles function, not the forecast() one\n",
|
||||
"y_pred_quantiles = fitted_model.forecast_quantiles(X_test)\n",
|
||||
"y_pred_quantiles = fitted_model.forecast_quantiles(X_test)\n",
|
||||
"\n",
|
||||
"# quantile forecasts returned in a Dataframe along with the time and grain columns \n",
|
||||
"# quantile forecasts returned in a Dataframe along with the time and time series id columns\n",
|
||||
"y_pred_quantiles"
|
||||
]
|
||||
},
|
||||
@@ -493,7 +512,7 @@
|
||||
"source": [
|
||||
"#### Destination-date forecast: \"just do something\"\n",
|
||||
"\n",
|
||||
"In some scenarios, the X_test is not known. The forecast is likely to be weak, because it is missing contemporaneous predictors, which we will need to impute. If you still wish to predict forward under the assumption that the last known values will be carried forward, you can forecast out to \"destination date\". The destination date still needs to fit within the maximum horizon from training."
|
||||
"In some scenarios, the X_test is not known. The forecast is likely to be weak, because it is missing contemporaneous predictors, which we will need to impute. If you still wish to predict forward under the assumption that the last known values will be carried forward, you can forecast out to \"destination date\". The destination date still needs to fit within the forecast horizon from training."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -520,7 +539,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The notion of forecast origin comes into play: the forecast origin is **the last period for which we have seen the target value**. This applies per grain, so each grain can have a different forecast origin. \n",
|
||||
"The notion of forecast origin comes into play: the forecast origin is **the last period for which we have seen the target value**. This applies per time-series, so each time-series can have a different forecast origin. \n",
|
||||
"\n",
|
||||
"The part of data before the forecast origin is the **prediction context**. To provide the context values the model needs when it looks back, we pass definite values in `y_test` (aligned with corresponding times in `X_test`)."
|
||||
]
|
||||
@@ -531,19 +550,21 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# generate the same kind of test data we trained on, \n",
|
||||
"# generate the same kind of test data we trained on,\n",
|
||||
"# but now make the train set much longer, so that the test set will be in the future\n",
|
||||
"X_context, y_context, X_away, y_away = get_timeseries(train_len=42, # train data was 30 steps long\n",
|
||||
" test_len=4,\n",
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" target_column_name=TARGET_COLUMN_NAME,\n",
|
||||
" grain_column_name=GRAIN_COLUMN_NAME,\n",
|
||||
" grains=2)\n",
|
||||
"X_context, y_context, X_away, y_away = get_timeseries(\n",
|
||||
" train_len=42, # train data was 30 steps long\n",
|
||||
" test_len=4,\n",
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" target_column_name=TARGET_COLUMN_NAME,\n",
|
||||
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
|
||||
" time_series_number=2,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# end of the data we trained on\n",
|
||||
"print(X_train.groupby(GRAIN_COLUMN_NAME)[TIME_COLUMN_NAME].max())\n",
|
||||
"print(X_train.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())\n",
|
||||
"# start of the data we want to predict on\n",
|
||||
"print(X_away.groupby(GRAIN_COLUMN_NAME)[TIME_COLUMN_NAME].min())"
|
||||
"print(X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].min())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -559,7 +580,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"try: \n",
|
||||
"try:\n",
|
||||
" y_pred_away, xy_away = fitted_model.forecast(X_away)\n",
|
||||
" xy_away\n",
|
||||
"except Exception as e:\n",
|
||||
@@ -570,7 +591,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"How should we read that eror message? The forecast origin is at the last time the model saw an actual value of `y` (the target). That was at the end of the training data! The model is attempting to forecast from the end of training data. But the requested forecast periods are past the maximum horizon. We need to provide a define `y` value to establish the forecast origin.\n",
|
||||
"How should we read that eror message? The forecast origin is at the last time the model saw an actual value of `y` (the target). That was at the end of the training data! The model is attempting to forecast from the end of training data. But the requested forecast periods are past the forecast horizon. We need to provide a define `y` value to establish the forecast origin.\n",
|
||||
"\n",
|
||||
"We will use this helper function to take the required amount of context from the data preceding the testing data. It's definition is intentionally simplified to keep the idea in the clear."
|
||||
]
|
||||
@@ -581,32 +602,34 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def make_forecasting_query(fulldata, time_column_name, target_column_name, forecast_origin, horizon, lookback):\n",
|
||||
"def make_forecasting_query(\n",
|
||||
" fulldata, time_column_name, target_column_name, forecast_origin, horizon, lookback\n",
|
||||
"):\n",
|
||||
"\n",
|
||||
" \"\"\"\n",
|
||||
" This function will take the full dataset, and create the query\n",
|
||||
" to predict all values of the grain from the `forecast_origin`\n",
|
||||
" to predict all values of the time series from the `forecast_origin`\n",
|
||||
" forward for the next `horizon` horizons. Context from previous\n",
|
||||
" `lookback` periods will be included.\n",
|
||||
"\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"\n",
|
||||
" fulldata: pandas.DataFrame a time series dataset. Needs to contain X and y.\n",
|
||||
" time_column_name: string which column (must be in fulldata) is the time axis\n",
|
||||
" target_column_name: string which column (must be in fulldata) is to be forecast\n",
|
||||
" forecast_origin: datetime type the last time we (pretend to) have target values \n",
|
||||
" forecast_origin: datetime type the last time we (pretend to) have target values\n",
|
||||
" horizon: timedelta how far forward, in time units (not periods)\n",
|
||||
" lookback: timedelta how far back does the model look?\n",
|
||||
" lookback: timedelta how far back does the model look\n",
|
||||
"\n",
|
||||
" Example:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" ```\n",
|
||||
"\n",
|
||||
" forecast_origin = pd.to_datetime('2012-09-01') + pd.DateOffset(days=5) # forecast 5 days after end of training\n",
|
||||
" forecast_origin = pd.to_datetime(\"2012-09-01\") + pd.DateOffset(days=5) # forecast 5 days after end of training\n",
|
||||
" print(forecast_origin)\n",
|
||||
"\n",
|
||||
" X_query, y_query = make_forecasting_query(data, \n",
|
||||
" X_query, y_query = make_forecasting_query(data,\n",
|
||||
" forecast_origin = forecast_origin,\n",
|
||||
" horizon = pd.DateOffset(days=7), # 7 days into the future\n",
|
||||
" lookback = pd.DateOffset(days=1), # model has lag 1 period (day)\n",
|
||||
@@ -615,28 +638,28 @@
|
||||
" ```\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" X_past = fulldata[ (fulldata[ time_column_name ] > forecast_origin - lookback) &\n",
|
||||
" (fulldata[ time_column_name ] <= forecast_origin)\n",
|
||||
" ]\n",
|
||||
" X_past = fulldata[\n",
|
||||
" (fulldata[time_column_name] > forecast_origin - lookback)\n",
|
||||
" & (fulldata[time_column_name] <= forecast_origin)\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" X_future = fulldata[ (fulldata[ time_column_name ] > forecast_origin) &\n",
|
||||
" (fulldata[ time_column_name ] <= forecast_origin + horizon)\n",
|
||||
" ]\n",
|
||||
" X_future = fulldata[\n",
|
||||
" (fulldata[time_column_name] > forecast_origin)\n",
|
||||
" & (fulldata[time_column_name] <= forecast_origin + horizon)\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" y_past = X_past.pop(target_column_name).values.astype(np.float)\n",
|
||||
" y_future = X_future.pop(target_column_name).values.astype(np.float)\n",
|
||||
" y_past = X_past.pop(target_column_name).values.astype(float)\n",
|
||||
" y_future = X_future.pop(target_column_name).values.astype(float)\n",
|
||||
"\n",
|
||||
" # Now take y_future and turn it into question marks\n",
|
||||
" y_query = y_future.copy().astype(np.float) # because sometimes life hands you an int\n",
|
||||
" y_query = y_future.copy().astype(float) # because sometimes life hands you an int\n",
|
||||
" y_query.fill(np.NaN)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" print(\"X_past is \" + str(X_past.shape) + \" - shaped\")\n",
|
||||
" print(\"X_future is \" + str(X_future.shape) + \" - shaped\")\n",
|
||||
" print(\"y_past is \" + str(y_past.shape) + \" - shaped\")\n",
|
||||
" print(\"y_query is \" + str(y_query.shape) + \" - shaped\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" X_pred = pd.concat([X_past, X_future])\n",
|
||||
" y_pred = np.concatenate([y_past, y_query])\n",
|
||||
" return X_pred, y_pred"
|
||||
@@ -655,8 +678,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(X_context.groupby(GRAIN_COLUMN_NAME)[TIME_COLUMN_NAME].agg(['min','max','count']))\n",
|
||||
"print(X_away.groupby(GRAIN_COLUMN_NAME)[TIME_COLUMN_NAME].agg(['min','max','count']))\n",
|
||||
"print(\n",
|
||||
" X_context.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(\n",
|
||||
" [\"min\", \"max\", \"count\"]\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"print(\n",
|
||||
" X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(\n",
|
||||
" [\"min\", \"max\", \"count\"]\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"X_context.tail(5)"
|
||||
]
|
||||
},
|
||||
@@ -666,11 +697,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Since the length of the lookback is 3, \n",
|
||||
"# Since the length of the lookback is 3,\n",
|
||||
"# we need to add 3 periods from the context to the request\n",
|
||||
"# so that the model has the data it needs\n",
|
||||
"\n",
|
||||
"# Put the X and y back together for a while. \n",
|
||||
"# Put the X and y back together for a while.\n",
|
||||
"# They like each other and it makes them happy.\n",
|
||||
"X_context[TARGET_COLUMN_NAME] = y_context\n",
|
||||
"X_away[TARGET_COLUMN_NAME] = y_away\n",
|
||||
@@ -681,16 +712,17 @@
|
||||
"# it is indeed the last point of the context\n",
|
||||
"assert forecast_origin == X_context[TIME_COLUMN_NAME].max()\n",
|
||||
"print(\"Forecast origin: \" + str(forecast_origin))\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"# the model uses lags and rolling windows to look back in time\n",
|
||||
"n_lookback_periods = max(lags)\n",
|
||||
"lookback = pd.DateOffset(hours=n_lookback_periods)\n",
|
||||
"\n",
|
||||
"horizon = pd.DateOffset(hours=max_horizon)\n",
|
||||
"horizon = pd.DateOffset(hours=forecast_horizon)\n",
|
||||
"\n",
|
||||
"# now make the forecast query from context (refer to figure)\n",
|
||||
"X_pred, y_pred = make_forecasting_query(fulldata, TIME_COLUMN_NAME, TARGET_COLUMN_NAME,\n",
|
||||
" forecast_origin, horizon, lookback)\n",
|
||||
"X_pred, y_pred = make_forecasting_query(\n",
|
||||
" fulldata, TIME_COLUMN_NAME, TARGET_COLUMN_NAME, forecast_origin, horizon, lookback\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# show the forecast request aligned\n",
|
||||
"X_show = X_pred.copy()\n",
|
||||
@@ -702,7 +734,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that the forecast origin is at 17:00 for both grains, and periods from 18:00 are to be forecast."
|
||||
"Note that the forecast origin is at 17:00 for both time-series, and periods from 18:00 are to be forecast."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -717,15 +749,141 @@
|
||||
"# show the forecast aligned\n",
|
||||
"X_show = xy_away.reset_index()\n",
|
||||
"# without the generated features\n",
|
||||
"X_show[['date', 'grain', 'ext_predictor', '_automl_target_col']]\n",
|
||||
"X_show[[\"date\", \"time_series_id\", \"ext_predictor\", \"_automl_target_col\"]]\n",
|
||||
"# prediction is in _automl_target_col"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Forecasting farther than the forecast horizon <a id=\"recursive forecasting\"></a>\n",
|
||||
"When the forecast destination, or the latest date in the prediction data frame, is farther into the future than the specified forecast horizon, the forecaster must be iteratively applied. Here, we advance the forecast origin on each iteration over the prediction window, predicting `max_horizon` periods ahead on each iteration. There are two choices for the context data to use as the forecaster advances into the prediction window:\n",
|
||||
"\n",
|
||||
"1. We can use forecasted values from previous iterations (recursive forecast),\n",
|
||||
"2. We can use known, actual values of the target if they are available (rolling forecast).\n",
|
||||
"\n",
|
||||
"The first method is useful in a true forecasting scenario when we do not yet know the actual target values while the second is useful in an evaluation scenario where we want to compute accuracy metrics for the `max_horizon`-period-ahead forecaster over a long test set. We refer to the first as a **recursive forecast** since we apply the forecaster recursively over the prediction window and the second as a **rolling forecast** since we roll forward over known actuals.\n",
|
||||
"\n",
|
||||
"### Recursive forecasting\n",
|
||||
"By default, the `forecast()` function will make point predictions out to the later date using a recursive operation mode. Internally, the method recursively applies the regular forecaster to generate context so that we can forecast further into the future. \n",
|
||||
"\n",
|
||||
"To illustrate the use-case and operation of recursive forecasting, we'll consider an example with a single time-series where the forecasting period directly follows the training period and is twice as long as the forecasting horizon given at training time.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Internally, we apply the forecaster in an iterative manner and finish the forecast task in two interations. In the first iteration, we apply the forecaster and get the prediction for the first forecast-horizon periods (y_pred1). In the second iteraction, y_pred1 is used as the context to produce the prediction for the next forecast-horizon periods (y_pred2). The combination of (y_pred1 and y_pred2) gives the results for the total forecast periods. \n",
|
||||
"\n",
|
||||
"A caveat: forecast accuracy will likely be worse the farther we predict into the future since errors are compounded with recursive application of the forecaster.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# generate the same kind of test data we trained on, but with a single time-series and test period twice as long\n",
|
||||
"# as the forecast_horizon.\n",
|
||||
"_, _, X_test_long, y_test_long = get_timeseries(\n",
|
||||
" train_len=n_train_periods,\n",
|
||||
" test_len=forecast_horizon * 2,\n",
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" target_column_name=TARGET_COLUMN_NAME,\n",
|
||||
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
|
||||
" time_series_number=1,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].min())\n",
|
||||
"print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# forecast() function will invoke the recursive forecast method internally.\n",
|
||||
"y_pred_long, X_trans_long = fitted_model.forecast(X_test_long)\n",
|
||||
"y_pred_long"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# What forecast() function does in this case is equivalent to iterating it twice over the test set as the following.\n",
|
||||
"y_pred1, _ = fitted_model.forecast(X_test_long[:forecast_horizon])\n",
|
||||
"y_pred_all, _ = fitted_model.forecast(\n",
|
||||
" X_test_long, np.concatenate((y_pred1, np.full(forecast_horizon, np.nan)))\n",
|
||||
")\n",
|
||||
"np.array_equal(y_pred_all, y_pred_long)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Rolling forecasts\n",
|
||||
"A rolling forecast is a similar concept to the recursive forecasts described above except that we use known actual values of the target for our context data. We have provided a different, public method for this called `rolling_forecast`. In addition to test data and actuals (`X_test` and `y_test`), `rolling_forecast` also accepts an optional `step` parameter that controls how far the origin advances on each iteration. The recursive forecast mode uses a fixed step of `max_horizon` while `rolling_forecast` defaults to a step size of 1, but can be set to any integer from 1 to `max_horizon`, inclusive.\n",
|
||||
"\n",
|
||||
"Let's see what the rolling forecast looks like on the long test set with the step set to 1:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_rf = fitted_model.rolling_forecast(X_test_long, y_test_long, step=1)\n",
|
||||
"X_rf.head(n=12)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Notice that `rolling_forecast` has returned a single DataFrame containing all results and has generated some new columns: `_automl_forecast_origin`, `_automl_forecast_y`, and `_automl_actual_y`. These are the origin date for each forecast, the forecasted value and the actual value, respectively. Note that \"y\" in the forecast and actual column names will generally be replaced by the target column name supplied to AutoML.\n",
|
||||
"\n",
|
||||
"The output above shows forecasts for two prediction windows, the first with origin at the end of the training set and the second including the first observation in the test set (2000-01-01 06:00:00). Since the forecast windows overlap, there are multiple forecasts for most dates which are associated with different origin dates."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Confidence interval and distributional forecasts\n",
|
||||
"AutoML cannot currently estimate forecast errors beyond the forecast horizon set during training, so the `forecast_quantiles()` function will return missing values for quantiles not equal to 0.5 beyond the forecast horizon. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_model.forecast_quantiles(X_test_long)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Similarly with the simple senarios illustrated above, forecasting farther than the forecast horizon in other senarios like 'multiple time-series', 'Destination-date forecast', and 'forecast away from the training data' are also automatically handled by the `forecast()` function. "
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "erwright"
|
||||
"name": "jialiu"
|
||||
}
|
||||
],
|
||||
"category": "tutorial",
|
||||
@@ -745,9 +903,9 @@
|
||||
"friendly_name": "Forecasting away from training data",
|
||||
"index_order": 3,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -759,14 +917,19 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
"version": "3.7.13"
|
||||
},
|
||||
"tags": [
|
||||
"Forecasting",
|
||||
"Confidence Intervals"
|
||||
],
|
||||
"task": "Forecasting"
|
||||
"task": "Forecasting",
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
After Width: | Height: | Size: 69 KiB |
|
After Width: | Height: | Size: 65 KiB |
|
After Width: | Height: | Size: 28 KiB |
|
After Width: | Height: | Size: 61 KiB |
|
After Width: | Height: | Size: 25 KiB |
@@ -19,7 +19,14 @@
|
||||
"hidePrompt": false
|
||||
},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<font color=\"red\" size=\"5\"><strong>!Important!</strong> </br>This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-github-dau)).</font>"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -30,7 +37,7 @@
|
||||
},
|
||||
"source": [
|
||||
"# Automated Machine Learning\n",
|
||||
"**Beer Production Forecasting**\n",
|
||||
"**Github DAU Forecasting**\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
@@ -48,17 +55,16 @@
|
||||
},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"This notebook demonstrates demand forecasting for Beer Production Dataset using AutoML.\n",
|
||||
"This notebook demonstrates demand forecasting for Github Daily Active Users Dataset using AutoML.\n",
|
||||
"\n",
|
||||
"AutoML highlights here include using Deep Learning forecasts, Arima, Prophet, Remote Execution and Remote Inferencing, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
|
||||
"\n",
|
||||
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
||||
"\n",
|
||||
"An Enterprise workspace is required for this notebook. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page.](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade)\n",
|
||||
"Make sure you have executed the [configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) before running this notebook.\n",
|
||||
"\n",
|
||||
"Notebook synopsis:\n",
|
||||
"\n",
|
||||
"1. Creating an Experiment in an existing Workspace\n",
|
||||
"2. Configuration and remote run of AutoML for a time-series model exploring Regression learners, Arima, Prophet and DNNs\n",
|
||||
"2. Configuration and remote run of AutoML for a time-series model exploring DNNs\n",
|
||||
"4. Evaluating the fitted model using a rolling test "
|
||||
]
|
||||
},
|
||||
@@ -93,8 +99,7 @@
|
||||
"# Squash warning messages for cleaner output in the notebook\n",
|
||||
"warnings.showwarning = lambda *args, **kwargs: None\n",
|
||||
"\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core import Workspace, Experiment, Dataset\n",
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
|
||||
@@ -105,7 +110,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -114,7 +119,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.4.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -140,18 +144,19 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'beer-remote-cpu'\n",
|
||||
"experiment_name = \"github-remote-cpu\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -163,7 +168,9 @@
|
||||
},
|
||||
"source": [
|
||||
"### Using AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource."
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -179,15 +186,16 @@
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your CPU cluster\n",
|
||||
"cpu_cluster_name = \"beer-cluster\"\n",
|
||||
"cpu_cluster_name = \"github-cluster\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
||||
" max_nodes=4)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -201,7 +209,7 @@
|
||||
},
|
||||
"source": [
|
||||
"## Data\n",
|
||||
"Read Beer demand data from file, and preview data."
|
||||
"Read Github DAU data from file, and preview data."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -217,7 +225,9 @@
|
||||
"\n",
|
||||
"**Time column** is the time axis along which to predict.\n",
|
||||
"\n",
|
||||
"**Grain** is another word for an individual time series in your dataset. Grains are identified by values of the columns listed `grain_column_names`, for example \"store\" and \"item\" if your data has multiple time series of sales, one series for each combination of store and item sold.\n",
|
||||
"**Time series identifier columns** are identified by values of the columns listed `time_series_id_column_names`, for example \"store\" and \"item\" if your data has multiple time series of sales, one series for each combination of store and item sold.\n",
|
||||
"\n",
|
||||
"**Forecast frequency (freq)** This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.\n",
|
||||
"\n",
|
||||
"This dataset has only one time series. Please see the [orange juice notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales) for an example of a multi-time series dataset."
|
||||
]
|
||||
@@ -242,17 +252,19 @@
|
||||
"plt.tight_layout()\n",
|
||||
"\n",
|
||||
"plt.subplot(2, 1, 1)\n",
|
||||
"plt.title('Beer Production By Year')\n",
|
||||
"df = pd.read_csv(\"Beer_no_valid_split_train.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n",
|
||||
"test_df = pd.read_csv(\"Beer_no_valid_split_test.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n",
|
||||
"plt.title(\"Github Daily Active User By Year\")\n",
|
||||
"df = pd.read_csv(\"github_dau_2011-2018_train.csv\", parse_dates=True, index_col=\"date\")\n",
|
||||
"test_df = pd.read_csv(\n",
|
||||
" \"github_dau_2011-2018_test.csv\", parse_dates=True, index_col=\"date\"\n",
|
||||
")\n",
|
||||
"plt.plot(df)\n",
|
||||
"\n",
|
||||
"plt.subplot(2, 1, 2)\n",
|
||||
"plt.title('Beer Production By Month')\n",
|
||||
"plt.title(\"Github Daily Active User By Month\")\n",
|
||||
"groups = df.groupby(df.index.month)\n",
|
||||
"months = concat([DataFrame(x[1].values) for x in groups], axis=1)\n",
|
||||
"months = DataFrame(months)\n",
|
||||
"months.columns = range(1,13)\n",
|
||||
"months.columns = range(1, 49)\n",
|
||||
"months.boxplot()\n",
|
||||
"\n",
|
||||
"plt.show()"
|
||||
@@ -267,10 +279,10 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"target_column_name = 'BeerProduction'\n",
|
||||
"time_column_name = 'DATE'\n",
|
||||
"grain_column_names = []\n",
|
||||
"freq = 'M' #Monthly data"
|
||||
"target_column_name = \"count\"\n",
|
||||
"time_column_name = \"date\"\n",
|
||||
"time_series_id_column_names = []\n",
|
||||
"freq = \"D\" # Daily data"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -293,19 +305,22 @@
|
||||
"from helper import split_full_for_forecasting\n",
|
||||
"\n",
|
||||
"train, valid = split_full_for_forecasting(df, time_column_name)\n",
|
||||
"train.to_csv(\"train.csv\")\n",
|
||||
"valid.to_csv(\"valid.csv\")\n",
|
||||
"test_df.to_csv(\"test.csv\")\n",
|
||||
"\n",
|
||||
"# Reset index to create a Tabualr Dataset.\n",
|
||||
"train.reset_index(inplace=True)\n",
|
||||
"valid.reset_index(inplace=True)\n",
|
||||
"test_df.reset_index(inplace=True)\n",
|
||||
"\n",
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore.upload_files(files = ['./train.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
||||
"datastore.upload_files(files = ['./valid.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
||||
"datastore.upload_files(files = ['./test.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
||||
"\n",
|
||||
"from azureml.core import Dataset\n",
|
||||
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/train.csv')])\n",
|
||||
"valid_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/valid.csv')])\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])"
|
||||
"train_dataset = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||
" train, target=(datastore, \"dataset/\"), name=\"Github_DAU_train\"\n",
|
||||
")\n",
|
||||
"valid_dataset = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||
" valid, target=(datastore, \"dataset/\"), name=\"Github_DAU_valid\"\n",
|
||||
")\n",
|
||||
"test_dataset = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||
" test_df, target=(datastore, \"dataset/\"), name=\"Github_DAU_test\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -317,7 +332,7 @@
|
||||
"source": [
|
||||
"### Setting forecaster maximum horizon \n",
|
||||
"\n",
|
||||
"The forecast horizon is the number of periods into the future that the model should predict. Here, we set the horizon to 12 periods (i.e. 12 months). Notice that this is much shorter than the number of months in the test set; we will need to use a rolling test to evaluate the performance on the whole test set. For more discussion of forecast horizons and guiding principles for setting them, please see the [energy demand notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand). "
|
||||
"The forecast horizon is the number of periods into the future that the model should predict. Here, we set the horizon to 14 periods (i.e. 14 days). Notice that this is much shorter than the number of months in the test set; we will need to use a rolling test to evaluate the performance on the whole test set. For more discussion of forecast horizons and guiding principles for setting them, please see the [energy demand notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand). "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -329,7 +344,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"max_horizon = 12"
|
||||
"forecast_horizon = 14"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -350,11 +365,7 @@
|
||||
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
||||
"|**training_data**|Input dataset, containing both features and label column.|\n",
|
||||
"|**label_column_name**|The name of the label column.|\n",
|
||||
"|**enable_dnn**|Enable Forecasting DNNs|\n",
|
||||
"\n",
|
||||
"This notebook uses the blacklist_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blacklist_models list but you may need to increase the iteration_timeout_minutes parameter value to get results.\n",
|
||||
"\n",
|
||||
"This step requires an Enterprise workspace to gain access to this feature. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page.](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade)."
|
||||
"|**enable_dnn**|Enable Forecasting DNNs|\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -366,23 +377,30 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" 'time_column_name': time_column_name,\n",
|
||||
" 'max_horizon': max_horizon,\n",
|
||||
" 'enable_dnn' : True,\n",
|
||||
"}\n",
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" experiment_timeout_hours = 1,\n",
|
||||
" training_data=train_dataset,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" validation_data=valid_dataset, \n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" max_concurrent_iterations=4,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" **automl_settings)"
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" freq=\"D\", # Set the forecast frequency to be daily\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# To only allow the TCNForecaster we set the allowed_models parameter to reflect this.\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" experiment_timeout_hours=1.5,\n",
|
||||
" training_data=train_dataset,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" validation_data=valid_dataset,\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" max_concurrent_iterations=4,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" enable_dnn=True,\n",
|
||||
" allowed_models=[\"TCNForecaster\"],\n",
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -392,7 +410,7 @@
|
||||
"hidePrompt": false
|
||||
},
|
||||
"source": [
|
||||
"We will now run the experiment, starting with 10 iterations of model search. The experiment can be continued for more iterations if more accurate results are required."
|
||||
"We will now run the experiment, starting with 10 iterations of model search. The experiment can be continued for more iterations if more accurate results are required. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -404,8 +422,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run = experiment.submit(automl_config, show_output= False)\n",
|
||||
"remote_run"
|
||||
"remote_run = experiment.submit(automl_config, show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -422,15 +439,6 @@
|
||||
"# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run.wait_for_completion()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
@@ -462,6 +470,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from helper import get_result_df\n",
|
||||
"\n",
|
||||
"summary_df = get_result_df(remote_run)\n",
|
||||
"summary_df"
|
||||
]
|
||||
@@ -477,11 +486,14 @@
|
||||
"source": [
|
||||
"from azureml.core.run import Run\n",
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"forecast_model = 'TCNForecaster'\n",
|
||||
"if not forecast_model in summary_df['run_id']:\n",
|
||||
" forecast_model = 'ForecastTCN'\n",
|
||||
" \n",
|
||||
"best_dnn_run_id = summary_df['run_id'][forecast_model]\n",
|
||||
"\n",
|
||||
"forecast_model = \"TCNForecaster\"\n",
|
||||
"if not forecast_model in summary_df[\"run_id\"]:\n",
|
||||
" forecast_model = \"ForecastTCN\"\n",
|
||||
"\n",
|
||||
"best_dnn_run_id = summary_df[summary_df[\"Score\"] == summary_df[\"Score\"].min()][\n",
|
||||
" \"run_id\"\n",
|
||||
"][forecast_model]\n",
|
||||
"best_dnn_run = Run(experiment, best_dnn_run_id)"
|
||||
]
|
||||
},
|
||||
@@ -495,7 +507,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_dnn_run.parent\n",
|
||||
"RunDetails(best_dnn_run.parent).show() "
|
||||
"RunDetails(best_dnn_run.parent).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -508,7 +520,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_dnn_run\n",
|
||||
"RunDetails(best_dnn_run).show() "
|
||||
"RunDetails(best_dnn_run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -542,8 +554,6 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])\n",
|
||||
"# preview the first 3 rows of the dataset\n",
|
||||
"test_dataset.take(5).to_pandas_dataframe()"
|
||||
]
|
||||
@@ -554,7 +564,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"compute_target = ws.compute_targets['beer-cluster']\n",
|
||||
"compute_target = ws.compute_targets[\"github-cluster\"]\n",
|
||||
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
|
||||
]
|
||||
},
|
||||
@@ -570,9 +580,9 @@
|
||||
"import os\n",
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"script_folder = os.path.join(os.getcwd(), 'inference')\n",
|
||||
"script_folder = os.path.join(os.getcwd(), \"inference\")\n",
|
||||
"os.makedirs(script_folder, exist_ok=True)\n",
|
||||
"shutil.copy('infer.py', script_folder)"
|
||||
"shutil.copy(\"infer.py\", script_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -583,8 +593,18 @@
|
||||
"source": [
|
||||
"from helper import run_inference\n",
|
||||
"\n",
|
||||
"test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run, test_dataset, valid_dataset, max_horizon,\n",
|
||||
" target_column_name, time_column_name, freq)"
|
||||
"test_run = run_inference(\n",
|
||||
" test_experiment,\n",
|
||||
" compute_target,\n",
|
||||
" script_folder,\n",
|
||||
" best_dnn_run,\n",
|
||||
" test_dataset,\n",
|
||||
" valid_dataset,\n",
|
||||
" forecast_horizon,\n",
|
||||
" target_column_name,\n",
|
||||
" time_column_name,\n",
|
||||
" freq,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -604,8 +624,19 @@
|
||||
"source": [
|
||||
"from helper import run_multiple_inferences\n",
|
||||
"\n",
|
||||
"summary_df = run_multiple_inferences(summary_df, experiment, test_experiment, compute_target, script_folder, test_dataset, \n",
|
||||
" valid_dataset, max_horizon, target_column_name, time_column_name, freq)"
|
||||
"summary_df = run_multiple_inferences(\n",
|
||||
" summary_df,\n",
|
||||
" experiment,\n",
|
||||
" test_experiment,\n",
|
||||
" compute_target,\n",
|
||||
" script_folder,\n",
|
||||
" test_dataset,\n",
|
||||
" valid_dataset,\n",
|
||||
" forecast_horizon,\n",
|
||||
" target_column_name,\n",
|
||||
" time_column_name,\n",
|
||||
" freq,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -625,7 +656,7 @@
|
||||
" test_run = Run(test_experiment, test_run_id)\n",
|
||||
" test_run.wait_for_completion()\n",
|
||||
" test_score = test_run.get_metrics()[run_summary.primary_metric]\n",
|
||||
" summary_df.loc[summary_df.run_id == run_id, 'Test Score'] = test_score\n",
|
||||
" summary_df.loc[summary_df.run_id == run_id, \"Test Score\"] = test_score\n",
|
||||
" print(\"Test Score: \", test_score)"
|
||||
]
|
||||
},
|
||||
@@ -652,14 +683,14 @@
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "omkarm"
|
||||
"name": "jialiu"
|
||||
}
|
||||
],
|
||||
"hide_code_all_hidden": false,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -671,9 +702,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
"version": "3.8.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -0,0 +1,455 @@
|
||||
date,count,day_of_week,month_of_year,holiday
|
||||
2017-06-04,104663,6.0,5.0,0.0
|
||||
2017-06-05,155824,0.0,5.0,0.0
|
||||
2017-06-06,164908,1.0,5.0,0.0
|
||||
2017-06-07,170309,2.0,5.0,0.0
|
||||
2017-06-08,164256,3.0,5.0,0.0
|
||||
2017-06-09,153406,4.0,5.0,0.0
|
||||
2017-06-10,97024,5.0,5.0,0.0
|
||||
2017-06-11,103442,6.0,5.0,0.0
|
||||
2017-06-12,160768,0.0,5.0,0.0
|
||||
2017-06-13,166288,1.0,5.0,0.0
|
||||
2017-06-14,163819,2.0,5.0,0.0
|
||||
2017-06-15,157593,3.0,5.0,0.0
|
||||
2017-06-16,149259,4.0,5.0,0.0
|
||||
2017-06-17,95579,5.0,5.0,0.0
|
||||
2017-06-18,98723,6.0,5.0,0.0
|
||||
2017-06-19,159076,0.0,5.0,0.0
|
||||
2017-06-20,163340,1.0,5.0,0.0
|
||||
2017-06-21,163344,2.0,5.0,0.0
|
||||
2017-06-22,159528,3.0,5.0,0.0
|
||||
2017-06-23,146563,4.0,5.0,0.0
|
||||
2017-06-24,92631,5.0,5.0,0.0
|
||||
2017-06-25,96549,6.0,5.0,0.0
|
||||
2017-06-26,153249,0.0,5.0,0.0
|
||||
2017-06-27,160357,1.0,5.0,0.0
|
||||
2017-06-28,159941,2.0,5.0,0.0
|
||||
2017-06-29,156781,3.0,5.0,0.0
|
||||
2017-06-30,144709,4.0,5.0,0.0
|
||||
2017-07-01,89101,5.0,6.0,0.0
|
||||
2017-07-02,93046,6.0,6.0,0.0
|
||||
2017-07-03,144113,0.0,6.0,0.0
|
||||
2017-07-04,143061,1.0,6.0,1.0
|
||||
2017-07-05,154603,2.0,6.0,0.0
|
||||
2017-07-06,157200,3.0,6.0,0.0
|
||||
2017-07-07,147213,4.0,6.0,0.0
|
||||
2017-07-08,92348,5.0,6.0,0.0
|
||||
2017-07-09,97018,6.0,6.0,0.0
|
||||
2017-07-10,157192,0.0,6.0,0.0
|
||||
2017-07-11,161819,1.0,6.0,0.0
|
||||
2017-07-12,161998,2.0,6.0,0.0
|
||||
2017-07-13,160280,3.0,6.0,0.0
|
||||
2017-07-14,146818,4.0,6.0,0.0
|
||||
2017-07-15,93041,5.0,6.0,0.0
|
||||
2017-07-16,97505,6.0,6.0,0.0
|
||||
2017-07-17,156167,0.0,6.0,0.0
|
||||
2017-07-18,162855,1.0,6.0,0.0
|
||||
2017-07-19,162519,2.0,6.0,0.0
|
||||
2017-07-20,159941,3.0,6.0,0.0
|
||||
2017-07-21,148460,4.0,6.0,0.0
|
||||
2017-07-22,93431,5.0,6.0,0.0
|
||||
2017-07-23,98553,6.0,6.0,0.0
|
||||
2017-07-24,156202,0.0,6.0,0.0
|
||||
2017-07-25,162503,1.0,6.0,0.0
|
||||
2017-07-26,158479,2.0,6.0,0.0
|
||||
2017-07-27,158192,3.0,6.0,0.0
|
||||
2017-07-28,147108,4.0,6.0,0.0
|
||||
2017-07-29,93799,5.0,6.0,0.0
|
||||
2017-07-30,97920,6.0,6.0,0.0
|
||||
2017-07-31,152197,0.0,6.0,0.0
|
||||
2017-08-01,158477,1.0,7.0,0.0
|
||||
2017-08-02,159089,2.0,7.0,0.0
|
||||
2017-08-03,157182,3.0,7.0,0.0
|
||||
2017-08-04,146345,4.0,7.0,0.0
|
||||
2017-08-05,92534,5.0,7.0,0.0
|
||||
2017-08-06,97128,6.0,7.0,0.0
|
||||
2017-08-07,151359,0.0,7.0,0.0
|
||||
2017-08-08,159895,1.0,7.0,0.0
|
||||
2017-08-09,158329,2.0,7.0,0.0
|
||||
2017-08-10,155468,3.0,7.0,0.0
|
||||
2017-08-11,144914,4.0,7.0,0.0
|
||||
2017-08-12,92258,5.0,7.0,0.0
|
||||
2017-08-13,95933,6.0,7.0,0.0
|
||||
2017-08-14,147706,0.0,7.0,0.0
|
||||
2017-08-15,151115,1.0,7.0,0.0
|
||||
2017-08-16,157640,2.0,7.0,0.0
|
||||
2017-08-17,156600,3.0,7.0,0.0
|
||||
2017-08-18,146980,4.0,7.0,0.0
|
||||
2017-08-19,94592,5.0,7.0,0.0
|
||||
2017-08-20,99320,6.0,7.0,0.0
|
||||
2017-08-21,145727,0.0,7.0,0.0
|
||||
2017-08-22,160260,1.0,7.0,0.0
|
||||
2017-08-23,160440,2.0,7.0,0.0
|
||||
2017-08-24,157830,3.0,7.0,0.0
|
||||
2017-08-25,145822,4.0,7.0,0.0
|
||||
2017-08-26,94706,5.0,7.0,0.0
|
||||
2017-08-27,99047,6.0,7.0,0.0
|
||||
2017-08-28,152112,0.0,7.0,0.0
|
||||
2017-08-29,162440,1.0,7.0,0.0
|
||||
2017-08-30,162902,2.0,7.0,0.0
|
||||
2017-08-31,159498,3.0,7.0,0.0
|
||||
2017-09-01,145689,4.0,8.0,0.0
|
||||
2017-09-02,93589,5.0,8.0,0.0
|
||||
2017-09-03,100058,6.0,8.0,0.0
|
||||
2017-09-04,140865,0.0,8.0,1.0
|
||||
2017-09-05,165715,1.0,8.0,0.0
|
||||
2017-09-06,167463,2.0,8.0,0.0
|
||||
2017-09-07,164811,3.0,8.0,0.0
|
||||
2017-09-08,156157,4.0,8.0,0.0
|
||||
2017-09-09,101358,5.0,8.0,0.0
|
||||
2017-09-10,107915,6.0,8.0,0.0
|
||||
2017-09-11,167845,0.0,8.0,0.0
|
||||
2017-09-12,172756,1.0,8.0,0.0
|
||||
2017-09-13,172851,2.0,8.0,0.0
|
||||
2017-09-14,171675,3.0,8.0,0.0
|
||||
2017-09-15,159266,4.0,8.0,0.0
|
||||
2017-09-16,103547,5.0,8.0,0.0
|
||||
2017-09-17,110964,6.0,8.0,0.0
|
||||
2017-09-18,170976,0.0,8.0,0.0
|
||||
2017-09-19,177864,1.0,8.0,0.0
|
||||
2017-09-20,173567,2.0,8.0,0.0
|
||||
2017-09-21,172017,3.0,8.0,0.0
|
||||
2017-09-22,161357,4.0,8.0,0.0
|
||||
2017-09-23,104681,5.0,8.0,0.0
|
||||
2017-09-24,111711,6.0,8.0,0.0
|
||||
2017-09-25,173517,0.0,8.0,0.0
|
||||
2017-09-26,180049,1.0,8.0,0.0
|
||||
2017-09-27,178307,2.0,8.0,0.0
|
||||
2017-09-28,174157,3.0,8.0,0.0
|
||||
2017-09-29,161707,4.0,8.0,0.0
|
||||
2017-09-30,110536,5.0,8.0,0.0
|
||||
2017-10-01,106505,6.0,9.0,0.0
|
||||
2017-10-02,157565,0.0,9.0,0.0
|
||||
2017-10-03,164764,1.0,9.0,0.0
|
||||
2017-10-04,163383,2.0,9.0,0.0
|
||||
2017-10-05,162847,3.0,9.0,0.0
|
||||
2017-10-06,153575,4.0,9.0,0.0
|
||||
2017-10-07,107472,5.0,9.0,0.0
|
||||
2017-10-08,116127,6.0,9.0,0.0
|
||||
2017-10-09,174457,0.0,9.0,1.0
|
||||
2017-10-10,185217,1.0,9.0,0.0
|
||||
2017-10-11,185120,2.0,9.0,0.0
|
||||
2017-10-12,180844,3.0,9.0,0.0
|
||||
2017-10-13,170178,4.0,9.0,0.0
|
||||
2017-10-14,112754,5.0,9.0,0.0
|
||||
2017-10-15,121251,6.0,9.0,0.0
|
||||
2017-10-16,183906,0.0,9.0,0.0
|
||||
2017-10-17,188945,1.0,9.0,0.0
|
||||
2017-10-18,187297,2.0,9.0,0.0
|
||||
2017-10-19,183867,3.0,9.0,0.0
|
||||
2017-10-20,173021,4.0,9.0,0.0
|
||||
2017-10-21,115851,5.0,9.0,0.0
|
||||
2017-10-22,126088,6.0,9.0,0.0
|
||||
2017-10-23,189452,0.0,9.0,0.0
|
||||
2017-10-24,194412,1.0,9.0,0.0
|
||||
2017-10-25,192293,2.0,9.0,0.0
|
||||
2017-10-26,190163,3.0,9.0,0.0
|
||||
2017-10-27,177053,4.0,9.0,0.0
|
||||
2017-10-28,114934,5.0,9.0,0.0
|
||||
2017-10-29,125289,6.0,9.0,0.0
|
||||
2017-10-30,189245,0.0,9.0,0.0
|
||||
2017-10-31,191480,1.0,9.0,0.0
|
||||
2017-11-01,182281,2.0,10.0,0.0
|
||||
2017-11-02,186351,3.0,10.0,0.0
|
||||
2017-11-03,175422,4.0,10.0,0.0
|
||||
2017-11-04,118160,5.0,10.0,0.0
|
||||
2017-11-05,127602,6.0,10.0,0.0
|
||||
2017-11-06,191067,0.0,10.0,0.0
|
||||
2017-11-07,197083,1.0,10.0,0.0
|
||||
2017-11-08,194333,2.0,10.0,0.0
|
||||
2017-11-09,193914,3.0,10.0,0.0
|
||||
2017-11-10,179933,4.0,10.0,1.0
|
||||
2017-11-11,121346,5.0,10.0,0.0
|
||||
2017-11-12,131900,6.0,10.0,0.0
|
||||
2017-11-13,196969,0.0,10.0,0.0
|
||||
2017-11-14,201949,1.0,10.0,0.0
|
||||
2017-11-15,198424,2.0,10.0,0.0
|
||||
2017-11-16,196902,3.0,10.0,0.0
|
||||
2017-11-17,183893,4.0,10.0,0.0
|
||||
2017-11-18,122767,5.0,10.0,0.0
|
||||
2017-11-19,130890,6.0,10.0,0.0
|
||||
2017-11-20,194515,0.0,10.0,0.0
|
||||
2017-11-21,198601,1.0,10.0,0.0
|
||||
2017-11-22,191041,2.0,10.0,0.0
|
||||
2017-11-23,170321,3.0,10.0,1.0
|
||||
2017-11-24,155623,4.0,10.0,0.0
|
||||
2017-11-25,115759,5.0,10.0,0.0
|
||||
2017-11-26,128771,6.0,10.0,0.0
|
||||
2017-11-27,199419,0.0,10.0,0.0
|
||||
2017-11-28,207253,1.0,10.0,0.0
|
||||
2017-11-29,205406,2.0,10.0,0.0
|
||||
2017-11-30,200674,3.0,10.0,0.0
|
||||
2017-12-01,187017,4.0,11.0,0.0
|
||||
2017-12-02,129735,5.0,11.0,0.0
|
||||
2017-12-03,139120,6.0,11.0,0.0
|
||||
2017-12-04,205505,0.0,11.0,0.0
|
||||
2017-12-05,208218,1.0,11.0,0.0
|
||||
2017-12-06,202480,2.0,11.0,0.0
|
||||
2017-12-07,197822,3.0,11.0,0.0
|
||||
2017-12-08,180686,4.0,11.0,0.0
|
||||
2017-12-09,123667,5.0,11.0,0.0
|
||||
2017-12-10,130987,6.0,11.0,0.0
|
||||
2017-12-11,193901,0.0,11.0,0.0
|
||||
2017-12-12,194997,1.0,11.0,0.0
|
||||
2017-12-13,192063,2.0,11.0,0.0
|
||||
2017-12-14,186496,3.0,11.0,0.0
|
||||
2017-12-15,170812,4.0,11.0,0.0
|
||||
2017-12-16,110474,5.0,11.0,0.0
|
||||
2017-12-17,118165,6.0,11.0,0.0
|
||||
2017-12-18,176843,0.0,11.0,0.0
|
||||
2017-12-19,179550,1.0,11.0,0.0
|
||||
2017-12-20,173506,2.0,11.0,0.0
|
||||
2017-12-21,165910,3.0,11.0,0.0
|
||||
2017-12-22,145886,4.0,11.0,0.0
|
||||
2017-12-23,95246,5.0,11.0,0.0
|
||||
2017-12-24,88781,6.0,11.0,0.0
|
||||
2017-12-25,98189,0.0,11.0,1.0
|
||||
2017-12-26,121383,1.0,11.0,0.0
|
||||
2017-12-27,135300,2.0,11.0,0.0
|
||||
2017-12-28,136827,3.0,11.0,0.0
|
||||
2017-12-29,127700,4.0,11.0,0.0
|
||||
2017-12-30,93014,5.0,11.0,0.0
|
||||
2017-12-31,82878,6.0,11.0,0.0
|
||||
2018-01-01,86419,0.0,0.0,1.0
|
||||
2018-01-02,147428,1.0,0.0,0.0
|
||||
2018-01-03,162193,2.0,0.0,0.0
|
||||
2018-01-04,163784,3.0,0.0,0.0
|
||||
2018-01-05,158606,4.0,0.0,0.0
|
||||
2018-01-06,113467,5.0,0.0,0.0
|
||||
2018-01-07,118313,6.0,0.0,0.0
|
||||
2018-01-08,175623,0.0,0.0,0.0
|
||||
2018-01-09,183880,1.0,0.0,0.0
|
||||
2018-01-10,183945,2.0,0.0,0.0
|
||||
2018-01-11,181769,3.0,0.0,0.0
|
||||
2018-01-12,170552,4.0,0.0,0.0
|
||||
2018-01-13,115707,5.0,0.0,0.0
|
||||
2018-01-14,121191,6.0,0.0,0.0
|
||||
2018-01-15,176127,0.0,0.0,1.0
|
||||
2018-01-16,188032,1.0,0.0,0.0
|
||||
2018-01-17,189871,2.0,0.0,0.0
|
||||
2018-01-18,189348,3.0,0.0,0.0
|
||||
2018-01-19,177456,4.0,0.0,0.0
|
||||
2018-01-20,123321,5.0,0.0,0.0
|
||||
2018-01-21,128306,6.0,0.0,0.0
|
||||
2018-01-22,186132,0.0,0.0,0.0
|
||||
2018-01-23,197618,1.0,0.0,0.0
|
||||
2018-01-24,196402,2.0,0.0,0.0
|
||||
2018-01-25,192722,3.0,0.0,0.0
|
||||
2018-01-26,179415,4.0,0.0,0.0
|
||||
2018-01-27,125769,5.0,0.0,0.0
|
||||
2018-01-28,133306,6.0,0.0,0.0
|
||||
2018-01-29,194151,0.0,0.0,0.0
|
||||
2018-01-30,198680,1.0,0.0,0.0
|
||||
2018-01-31,198652,2.0,0.0,0.0
|
||||
2018-02-01,195472,3.0,1.0,0.0
|
||||
2018-02-02,183173,4.0,1.0,0.0
|
||||
2018-02-03,124276,5.0,1.0,0.0
|
||||
2018-02-04,129054,6.0,1.0,0.0
|
||||
2018-02-05,190024,0.0,1.0,0.0
|
||||
2018-02-06,198658,1.0,1.0,0.0
|
||||
2018-02-07,198272,2.0,1.0,0.0
|
||||
2018-02-08,195339,3.0,1.0,0.0
|
||||
2018-02-09,183086,4.0,1.0,0.0
|
||||
2018-02-10,122536,5.0,1.0,0.0
|
||||
2018-02-11,133033,6.0,1.0,0.0
|
||||
2018-02-12,185386,0.0,1.0,0.0
|
||||
2018-02-13,184789,1.0,1.0,0.0
|
||||
2018-02-14,176089,2.0,1.0,0.0
|
||||
2018-02-15,171317,3.0,1.0,0.0
|
||||
2018-02-16,162693,4.0,1.0,0.0
|
||||
2018-02-17,116342,5.0,1.0,0.0
|
||||
2018-02-18,122466,6.0,1.0,0.0
|
||||
2018-02-19,172364,0.0,1.0,1.0
|
||||
2018-02-20,185896,1.0,1.0,0.0
|
||||
2018-02-21,188166,2.0,1.0,0.0
|
||||
2018-02-22,189427,3.0,1.0,0.0
|
||||
2018-02-23,178732,4.0,1.0,0.0
|
||||
2018-02-24,132664,5.0,1.0,0.0
|
||||
2018-02-25,134008,6.0,1.0,0.0
|
||||
2018-02-26,200075,0.0,1.0,0.0
|
||||
2018-02-27,207996,1.0,1.0,0.0
|
||||
2018-02-28,204416,2.0,1.0,0.0
|
||||
2018-03-01,201320,3.0,2.0,0.0
|
||||
2018-03-02,188205,4.0,2.0,0.0
|
||||
2018-03-03,131162,5.0,2.0,0.0
|
||||
2018-03-04,138320,6.0,2.0,0.0
|
||||
2018-03-05,207326,0.0,2.0,0.0
|
||||
2018-03-06,212462,1.0,2.0,0.0
|
||||
2018-03-07,209357,2.0,2.0,0.0
|
||||
2018-03-08,194876,3.0,2.0,0.0
|
||||
2018-03-09,193761,4.0,2.0,0.0
|
||||
2018-03-10,133449,5.0,2.0,0.0
|
||||
2018-03-11,142258,6.0,2.0,0.0
|
||||
2018-03-12,208753,0.0,2.0,0.0
|
||||
2018-03-13,210602,1.0,2.0,0.0
|
||||
2018-03-14,214236,2.0,2.0,0.0
|
||||
2018-03-15,210761,3.0,2.0,0.0
|
||||
2018-03-16,196619,4.0,2.0,0.0
|
||||
2018-03-17,133056,5.0,2.0,0.0
|
||||
2018-03-18,141335,6.0,2.0,0.0
|
||||
2018-03-19,211580,0.0,2.0,0.0
|
||||
2018-03-20,219051,1.0,2.0,0.0
|
||||
2018-03-21,215435,2.0,2.0,0.0
|
||||
2018-03-22,211961,3.0,2.0,0.0
|
||||
2018-03-23,196009,4.0,2.0,0.0
|
||||
2018-03-24,132390,5.0,2.0,0.0
|
||||
2018-03-25,140021,6.0,2.0,0.0
|
||||
2018-03-26,205273,0.0,2.0,0.0
|
||||
2018-03-27,212686,1.0,2.0,0.0
|
||||
2018-03-28,210683,2.0,2.0,0.0
|
||||
2018-03-29,189044,3.0,2.0,0.0
|
||||
2018-03-30,170256,4.0,2.0,0.0
|
||||
2018-03-31,125999,5.0,2.0,0.0
|
||||
2018-04-01,126749,6.0,3.0,0.0
|
||||
2018-04-02,186546,0.0,3.0,0.0
|
||||
2018-04-03,207905,1.0,3.0,0.0
|
||||
2018-04-04,201528,2.0,3.0,0.0
|
||||
2018-04-05,188580,3.0,3.0,0.0
|
||||
2018-04-06,173714,4.0,3.0,0.0
|
||||
2018-04-07,125723,5.0,3.0,0.0
|
||||
2018-04-08,142545,6.0,3.0,0.0
|
||||
2018-04-09,204767,0.0,3.0,0.0
|
||||
2018-04-10,212048,1.0,3.0,0.0
|
||||
2018-04-11,210517,2.0,3.0,0.0
|
||||
2018-04-12,206924,3.0,3.0,0.0
|
||||
2018-04-13,191679,4.0,3.0,0.0
|
||||
2018-04-14,126394,5.0,3.0,0.0
|
||||
2018-04-15,137279,6.0,3.0,0.0
|
||||
2018-04-16,208085,0.0,3.0,0.0
|
||||
2018-04-17,213273,1.0,3.0,0.0
|
||||
2018-04-18,211580,2.0,3.0,0.0
|
||||
2018-04-19,206037,3.0,3.0,0.0
|
||||
2018-04-20,191211,4.0,3.0,0.0
|
||||
2018-04-21,125564,5.0,3.0,0.0
|
||||
2018-04-22,136469,6.0,3.0,0.0
|
||||
2018-04-23,206288,0.0,3.0,0.0
|
||||
2018-04-24,212115,1.0,3.0,0.0
|
||||
2018-04-25,207948,2.0,3.0,0.0
|
||||
2018-04-26,205759,3.0,3.0,0.0
|
||||
2018-04-27,181330,4.0,3.0,0.0
|
||||
2018-04-28,130046,5.0,3.0,0.0
|
||||
2018-04-29,120802,6.0,3.0,0.0
|
||||
2018-04-30,170390,0.0,3.0,0.0
|
||||
2018-05-01,169054,1.0,4.0,0.0
|
||||
2018-05-02,197891,2.0,4.0,0.0
|
||||
2018-05-03,199820,3.0,4.0,0.0
|
||||
2018-05-04,186783,4.0,4.0,0.0
|
||||
2018-05-05,124420,5.0,4.0,0.0
|
||||
2018-05-06,130666,6.0,4.0,0.0
|
||||
2018-05-07,196014,0.0,4.0,0.0
|
||||
2018-05-08,203058,1.0,4.0,0.0
|
||||
2018-05-09,198582,2.0,4.0,0.0
|
||||
2018-05-10,191321,3.0,4.0,0.0
|
||||
2018-05-11,183639,4.0,4.0,0.0
|
||||
2018-05-12,122023,5.0,4.0,0.0
|
||||
2018-05-13,128775,6.0,4.0,0.0
|
||||
2018-05-14,199104,0.0,4.0,0.0
|
||||
2018-05-15,200658,1.0,4.0,0.0
|
||||
2018-05-16,201541,2.0,4.0,0.0
|
||||
2018-05-17,196886,3.0,4.0,0.0
|
||||
2018-05-18,188597,4.0,4.0,0.0
|
||||
2018-05-19,121392,5.0,4.0,0.0
|
||||
2018-05-20,126981,6.0,4.0,0.0
|
||||
2018-05-21,189291,0.0,4.0,0.0
|
||||
2018-05-22,203038,1.0,4.0,0.0
|
||||
2018-05-23,205330,2.0,4.0,0.0
|
||||
2018-05-24,199208,3.0,4.0,0.0
|
||||
2018-05-25,187768,4.0,4.0,0.0
|
||||
2018-05-26,117635,5.0,4.0,0.0
|
||||
2018-05-27,124352,6.0,4.0,0.0
|
||||
2018-05-28,180398,0.0,4.0,1.0
|
||||
2018-05-29,194170,1.0,4.0,0.0
|
||||
2018-05-30,200281,2.0,4.0,0.0
|
||||
2018-05-31,197244,3.0,4.0,0.0
|
||||
2018-06-01,184037,4.0,5.0,0.0
|
||||
2018-06-02,121135,5.0,5.0,0.0
|
||||
2018-06-03,129389,6.0,5.0,0.0
|
||||
2018-06-04,200331,0.0,5.0,0.0
|
||||
2018-06-05,207735,1.0,5.0,0.0
|
||||
2018-06-06,203354,2.0,5.0,0.0
|
||||
2018-06-07,200520,3.0,5.0,0.0
|
||||
2018-06-08,182038,4.0,5.0,0.0
|
||||
2018-06-09,120164,5.0,5.0,0.0
|
||||
2018-06-10,125256,6.0,5.0,0.0
|
||||
2018-06-11,194786,0.0,5.0,0.0
|
||||
2018-06-12,200815,1.0,5.0,0.0
|
||||
2018-06-13,197740,2.0,5.0,0.0
|
||||
2018-06-14,192294,3.0,5.0,0.0
|
||||
2018-06-15,173587,4.0,5.0,0.0
|
||||
2018-06-16,105955,5.0,5.0,0.0
|
||||
2018-06-17,110780,6.0,5.0,0.0
|
||||
2018-06-18,174582,0.0,5.0,0.0
|
||||
2018-06-19,193310,1.0,5.0,0.0
|
||||
2018-06-20,193062,2.0,5.0,0.0
|
||||
2018-06-21,187986,3.0,5.0,0.0
|
||||
2018-06-22,173606,4.0,5.0,0.0
|
||||
2018-06-23,111795,5.0,5.0,0.0
|
||||
2018-06-24,116134,6.0,5.0,0.0
|
||||
2018-06-25,185919,0.0,5.0,0.0
|
||||
2018-06-26,193142,1.0,5.0,0.0
|
||||
2018-06-27,188114,2.0,5.0,0.0
|
||||
2018-06-28,183737,3.0,5.0,0.0
|
||||
2018-06-29,171496,4.0,5.0,0.0
|
||||
2018-06-30,107210,5.0,5.0,0.0
|
||||
2018-07-01,111053,6.0,6.0,0.0
|
||||
2018-07-02,176198,0.0,6.0,0.0
|
||||
2018-07-03,184040,1.0,6.0,0.0
|
||||
2018-07-04,169783,2.0,6.0,1.0
|
||||
2018-07-05,177996,3.0,6.0,0.0
|
||||
2018-07-06,167378,4.0,6.0,0.0
|
||||
2018-07-07,106401,5.0,6.0,0.0
|
||||
2018-07-08,112327,6.0,6.0,0.0
|
||||
2018-07-09,182835,0.0,6.0,0.0
|
||||
2018-07-10,187694,1.0,6.0,0.0
|
||||
2018-07-11,185762,2.0,6.0,0.0
|
||||
2018-07-12,184099,3.0,6.0,0.0
|
||||
2018-07-13,170860,4.0,6.0,0.0
|
||||
2018-07-14,106799,5.0,6.0,0.0
|
||||
2018-07-15,108475,6.0,6.0,0.0
|
||||
2018-07-16,175704,0.0,6.0,0.0
|
||||
2018-07-17,183596,1.0,6.0,0.0
|
||||
2018-07-18,179897,2.0,6.0,0.0
|
||||
2018-07-19,183373,3.0,6.0,0.0
|
||||
2018-07-20,169626,4.0,6.0,0.0
|
||||
2018-07-21,106785,5.0,6.0,0.0
|
||||
2018-07-22,112387,6.0,6.0,0.0
|
||||
2018-07-23,180572,0.0,6.0,0.0
|
||||
2018-07-24,186943,1.0,6.0,0.0
|
||||
2018-07-25,185744,2.0,6.0,0.0
|
||||
2018-07-26,183117,3.0,6.0,0.0
|
||||
2018-07-27,168526,4.0,6.0,0.0
|
||||
2018-07-28,105936,5.0,6.0,0.0
|
||||
2018-07-29,111708,6.0,6.0,0.0
|
||||
2018-07-30,179950,0.0,6.0,0.0
|
||||
2018-07-31,185930,1.0,6.0,0.0
|
||||
2018-08-01,183366,2.0,7.0,0.0
|
||||
2018-08-02,182412,3.0,7.0,0.0
|
||||
2018-08-03,173429,4.0,7.0,0.0
|
||||
2018-08-04,106108,5.0,7.0,0.0
|
||||
2018-08-05,110059,6.0,7.0,0.0
|
||||
2018-08-06,178355,0.0,7.0,0.0
|
||||
2018-08-07,185518,1.0,7.0,0.0
|
||||
2018-08-08,183204,2.0,7.0,0.0
|
||||
2018-08-09,181276,3.0,7.0,0.0
|
||||
2018-08-10,168297,4.0,7.0,0.0
|
||||
2018-08-11,106488,5.0,7.0,0.0
|
||||
2018-08-12,111786,6.0,7.0,0.0
|
||||
2018-08-13,178620,0.0,7.0,0.0
|
||||
2018-08-14,181922,1.0,7.0,0.0
|
||||
2018-08-15,172198,2.0,7.0,0.0
|
||||
2018-08-16,177367,3.0,7.0,0.0
|
||||
2018-08-17,166550,4.0,7.0,0.0
|
||||
2018-08-18,107011,5.0,7.0,0.0
|
||||
2018-08-19,112299,6.0,7.0,0.0
|
||||
2018-08-20,176718,0.0,7.0,0.0
|
||||
2018-08-21,182562,1.0,7.0,0.0
|
||||
2018-08-22,181484,2.0,7.0,0.0
|
||||
2018-08-23,180317,3.0,7.0,0.0
|
||||
2018-08-24,170197,4.0,7.0,0.0
|
||||
2018-08-25,109383,5.0,7.0,0.0
|
||||
2018-08-26,113373,6.0,7.0,0.0
|
||||
2018-08-27,180142,0.0,7.0,0.0
|
||||
2018-08-28,191628,1.0,7.0,0.0
|
||||
2018-08-29,191149,2.0,7.0,0.0
|
||||
2018-08-30,187503,3.0,7.0,0.0
|
||||
2018-08-31,172280,4.0,7.0,0.0
|
||||
|
@@ -0,0 +1,176 @@
|
||||
import pandas as pd
|
||||
from azureml.core import Environment
|
||||
from azureml.core.conda_dependencies import CondaDependencies
|
||||
from azureml.train.estimator import Estimator
|
||||
from azureml.core.run import Run
|
||||
from azureml.automl.core.shared import constants
|
||||
|
||||
|
||||
def split_fraction_by_grain(df, fraction, time_column_name, grain_column_names=None):
|
||||
if not grain_column_names:
|
||||
df["tmp_grain_column"] = "grain"
|
||||
grain_column_names = ["tmp_grain_column"]
|
||||
|
||||
"""Group df by grain and split on last n rows for each group."""
|
||||
df_grouped = df.sort_values(time_column_name).groupby(
|
||||
grain_column_names, group_keys=False
|
||||
)
|
||||
|
||||
df_head = df_grouped.apply(
|
||||
lambda dfg: dfg.iloc[: -int(len(dfg) * fraction)] if fraction > 0 else dfg
|
||||
)
|
||||
|
||||
df_tail = df_grouped.apply(
|
||||
lambda dfg: dfg.iloc[-int(len(dfg) * fraction) :] if fraction > 0 else dfg[:0]
|
||||
)
|
||||
|
||||
if "tmp_grain_column" in grain_column_names:
|
||||
for df2 in (df, df_head, df_tail):
|
||||
df2.drop("tmp_grain_column", axis=1, inplace=True)
|
||||
|
||||
grain_column_names.remove("tmp_grain_column")
|
||||
|
||||
return df_head, df_tail
|
||||
|
||||
|
||||
def split_full_for_forecasting(
|
||||
df, time_column_name, grain_column_names=None, test_split=0.2
|
||||
):
|
||||
index_name = df.index.name
|
||||
|
||||
# Assumes that there isn't already a column called tmpindex
|
||||
|
||||
df["tmpindex"] = df.index
|
||||
|
||||
train_df, test_df = split_fraction_by_grain(
|
||||
df, test_split, time_column_name, grain_column_names
|
||||
)
|
||||
|
||||
train_df = train_df.set_index("tmpindex")
|
||||
train_df.index.name = index_name
|
||||
|
||||
test_df = test_df.set_index("tmpindex")
|
||||
test_df.index.name = index_name
|
||||
|
||||
df.drop("tmpindex", axis=1, inplace=True)
|
||||
|
||||
return train_df, test_df
|
||||
|
||||
|
||||
def get_result_df(remote_run):
|
||||
children = list(remote_run.get_children(recursive=True))
|
||||
summary_df = pd.DataFrame(
|
||||
index=["run_id", "run_algorithm", "primary_metric", "Score"]
|
||||
)
|
||||
goal_minimize = False
|
||||
for run in children:
|
||||
if (
|
||||
run.get_status().lower() == constants.RunState.COMPLETE_RUN
|
||||
and "run_algorithm" in run.properties
|
||||
and "score" in run.properties
|
||||
):
|
||||
# We only count in the completed child runs.
|
||||
summary_df[run.id] = [
|
||||
run.id,
|
||||
run.properties["run_algorithm"],
|
||||
run.properties["primary_metric"],
|
||||
float(run.properties["score"]),
|
||||
]
|
||||
if "goal" in run.properties:
|
||||
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
|
||||
|
||||
summary_df = summary_df.T.sort_values("Score", ascending=goal_minimize)
|
||||
summary_df = summary_df.set_index("run_algorithm")
|
||||
return summary_df
|
||||
|
||||
|
||||
def run_inference(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
script_folder,
|
||||
train_run,
|
||||
test_dataset,
|
||||
lookback_dataset,
|
||||
max_horizon,
|
||||
target_column_name,
|
||||
time_column_name,
|
||||
freq,
|
||||
):
|
||||
model_base_name = "model.pkl"
|
||||
if "model_data_location" in train_run.properties:
|
||||
model_location = train_run.properties["model_data_location"]
|
||||
_, model_base_name = model_location.rsplit("/", 1)
|
||||
train_run.download_file(
|
||||
"outputs/{}".format(model_base_name), "inference/{}".format(model_base_name)
|
||||
)
|
||||
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
est = Estimator(
|
||||
source_directory=script_folder,
|
||||
entry_script="infer.py",
|
||||
script_params={
|
||||
"--max_horizon": max_horizon,
|
||||
"--target_column_name": target_column_name,
|
||||
"--time_column_name": time_column_name,
|
||||
"--frequency": freq,
|
||||
"--model_path": model_base_name,
|
||||
},
|
||||
inputs=[
|
||||
test_dataset.as_named_input("test_data"),
|
||||
lookback_dataset.as_named_input("lookback_data"),
|
||||
],
|
||||
compute_target=compute_target,
|
||||
environment_definition=inference_env,
|
||||
)
|
||||
|
||||
run = test_experiment.submit(
|
||||
est,
|
||||
tags={
|
||||
"training_run_id": train_run.id,
|
||||
"run_algorithm": train_run.properties["run_algorithm"],
|
||||
"valid_score": train_run.properties["score"],
|
||||
"primary_metric": train_run.properties["primary_metric"],
|
||||
},
|
||||
)
|
||||
|
||||
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||
return run
|
||||
|
||||
|
||||
def run_multiple_inferences(
|
||||
summary_df,
|
||||
train_experiment,
|
||||
test_experiment,
|
||||
compute_target,
|
||||
script_folder,
|
||||
test_dataset,
|
||||
lookback_dataset,
|
||||
max_horizon,
|
||||
target_column_name,
|
||||
time_column_name,
|
||||
freq,
|
||||
):
|
||||
for run_name, run_summary in summary_df.iterrows():
|
||||
print(run_name)
|
||||
print(run_summary)
|
||||
run_id = run_summary.run_id
|
||||
train_run = Run(train_experiment, run_id)
|
||||
|
||||
test_run = run_inference(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
script_folder,
|
||||
train_run,
|
||||
test_dataset,
|
||||
lookback_dataset,
|
||||
max_horizon,
|
||||
target_column_name,
|
||||
time_column_name,
|
||||
freq,
|
||||
)
|
||||
|
||||
print(test_run)
|
||||
summary_df.loc[summary_df.run_id == run_id, "test_run_id"] = test_run.id
|
||||
|
||||
return summary_df
|
||||
@@ -0,0 +1,145 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
import joblib
|
||||
from sklearn.metrics import mean_absolute_error, mean_squared_error
|
||||
|
||||
from azureml.automl.runtime.shared.score import scoring, constants
|
||||
from azureml.core import Run
|
||||
|
||||
try:
|
||||
import torch
|
||||
|
||||
_torch_present = True
|
||||
except ImportError:
|
||||
_torch_present = False
|
||||
|
||||
|
||||
def map_location_cuda(storage, loc):
|
||||
return storage.cuda()
|
||||
|
||||
|
||||
def APE(actual, pred):
|
||||
"""
|
||||
Calculate absolute percentage error.
|
||||
Returns a vector of APE values with same length as actual/pred.
|
||||
"""
|
||||
return 100 * np.abs((actual - pred) / actual)
|
||||
|
||||
|
||||
def MAPE(actual, pred):
|
||||
"""
|
||||
Calculate mean absolute percentage error.
|
||||
Remove NA and values where actual is close to zero
|
||||
"""
|
||||
not_na = ~(np.isnan(actual) | np.isnan(pred))
|
||||
not_zero = ~np.isclose(actual, 0.0)
|
||||
actual_safe = actual[not_na & not_zero]
|
||||
pred_safe = pred[not_na & not_zero]
|
||||
return np.mean(APE(actual_safe, pred_safe))
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--max_horizon",
|
||||
type=int,
|
||||
dest="max_horizon",
|
||||
default=10,
|
||||
help="Max Horizon for forecasting",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--target_column_name",
|
||||
type=str,
|
||||
dest="target_column_name",
|
||||
help="Target Column Name",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--time_column_name", type=str, dest="time_column_name", help="Time Column Name"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--frequency", type=str, dest="freq", help="Frequency of prediction"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model_path",
|
||||
type=str,
|
||||
dest="model_path",
|
||||
default="model.pkl",
|
||||
help="Filename of model to be loaded",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
max_horizon = args.max_horizon
|
||||
target_column_name = args.target_column_name
|
||||
time_column_name = args.time_column_name
|
||||
freq = args.freq
|
||||
model_path = args.model_path
|
||||
|
||||
print("args passed are: ")
|
||||
print(max_horizon)
|
||||
print(target_column_name)
|
||||
print(time_column_name)
|
||||
print(freq)
|
||||
print(model_path)
|
||||
|
||||
run = Run.get_context()
|
||||
# get input dataset by name
|
||||
test_dataset = run.input_datasets["test_data"]
|
||||
|
||||
grain_column_names = []
|
||||
|
||||
df = test_dataset.to_pandas_dataframe()
|
||||
|
||||
print("Read df")
|
||||
print(df)
|
||||
|
||||
X_test_df = df
|
||||
y_test = df.pop(target_column_name).to_numpy()
|
||||
|
||||
_, ext = os.path.splitext(model_path)
|
||||
if ext == ".pt":
|
||||
# Load the fc-tcn torch model.
|
||||
assert _torch_present
|
||||
if torch.cuda.is_available():
|
||||
map_location = map_location_cuda
|
||||
else:
|
||||
map_location = "cpu"
|
||||
with open(model_path, "rb") as fh:
|
||||
fitted_model = torch.load(fh, map_location=map_location)
|
||||
else:
|
||||
# Load the sklearn pipeline.
|
||||
fitted_model = joblib.load(model_path)
|
||||
|
||||
X_rf = fitted_model.rolling_forecast(X_test_df, y_test, step=1)
|
||||
assign_dict = {
|
||||
fitted_model.forecast_origin_column_name: "forecast_origin",
|
||||
fitted_model.forecast_column_name: "predicted",
|
||||
fitted_model.actual_column_name: target_column_name,
|
||||
}
|
||||
X_rf.rename(columns=assign_dict, inplace=True)
|
||||
|
||||
print(X_rf.head())
|
||||
|
||||
# Use the AutoML scoring module
|
||||
regression_metrics = list(constants.REGRESSION_SCALAR_SET)
|
||||
y_test = np.array(X_rf[target_column_name])
|
||||
y_pred = np.array(X_rf["predicted"])
|
||||
scores = scoring.score_regression(y_test, y_pred, regression_metrics)
|
||||
|
||||
print("scores:")
|
||||
print(scores)
|
||||
|
||||
for key, value in scores.items():
|
||||
run.log(key, value)
|
||||
|
||||
print("Simple forecasting model")
|
||||
rmse = np.sqrt(mean_squared_error(X_rf[target_column_name], X_rf["predicted"]))
|
||||
print("[Test Data] \nRoot Mean squared error: %.2f" % rmse)
|
||||
mae = mean_absolute_error(X_rf[target_column_name], X_rf["predicted"])
|
||||
print("mean_absolute_error score: %.2f" % mae)
|
||||
print("MAPE: %.2f" % MAPE(X_rf[target_column_name], X_rf["predicted"]))
|
||||
|
||||
run.log("rmse", rmse)
|
||||
run.log("mae", mae)
|
||||
@@ -0,0 +1,681 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<font color=\"red\" size=\"5\"><strong>!Important!</strong> </br>This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/pipelines/1k_demand_forecasting_with_pipeline_components/automl-forecasting-demand-hierarchical-timeseries-in-pipeline)).</font>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Hierarchical Time Series - Automated ML\n",
|
||||
"**_Generate hierarchical time series forecasts with Automated Machine Learning_**\n",
|
||||
"\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
|
||||
"\n",
|
||||
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"You'll need to create a compute Instance by following [these](https://learn.microsoft.com/en-us/azure/machine-learning/v1/how-to-create-manage-compute-instance?tabs=python) instructions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1.0 Set up workspace, datastore, experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003526897
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Datastore\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"# Set up your workspace\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"ws.get_details()\n",
|
||||
"\n",
|
||||
"# Set up your datastores\n",
|
||||
"dstore = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output[\"SDK version\"] = azureml.core.VERSION\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Default datastore name\"] = dstore.name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose an experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003540729
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, \"automl-hts\")\n",
|
||||
"\n",
|
||||
"print(\"Experiment name: \" + experiment.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2.0 Data\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### Upload local csv files to datastore\n",
|
||||
"You can upload your train and inference csv files to the default datastore in your workspace. \n",
|
||||
"\n",
|
||||
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
|
||||
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore.datastore?view=azure-ml-py) documentation on how to access data from Datastore."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore_path = \"hts-sample\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create the TabularDatasets \n",
|
||||
"\n",
|
||||
"Datasets in Azure Machine Learning are references to specific data in a Datastore. The data can be retrieved as a [TabularDatasets](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py). We will read in the data as a pandas DataFrame, upload to the data store and register them to your Workspace using ```register_pandas_dataframe``` so they can be called as an input into the training pipeline. We will use the inference dataset as part of the forecasting pipeline. The step need only be completed once."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007017296
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||
"\n",
|
||||
"registered_train = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" pd.read_csv(\"Data/hts-sample-train.csv\"),\n",
|
||||
" target=(datastore, \"hts-sample\"),\n",
|
||||
" name=\"hts-sales-train\",\n",
|
||||
")\n",
|
||||
"registered_inference = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" pd.read_csv(\"Data/hts-sample-test.csv\"),\n",
|
||||
" target=(datastore, \"hts-sample\"),\n",
|
||||
" name=\"hts-sales-test\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3.0 Build the training pipeline\n",
|
||||
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose a compute target\n",
|
||||
"\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
|
||||
"\n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007037308
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"\n",
|
||||
"# Name your cluster\n",
|
||||
"compute_name = \"hts-compute\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if compute_name in ws.compute_targets:\n",
|
||||
" compute_target = ws.compute_targets[compute_name]\n",
|
||||
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||
" print(\"Found compute target: \" + compute_name)\n",
|
||||
"else:\n",
|
||||
" print(\"Creating a new compute target...\")\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_D16S_V3\", max_nodes=20\n",
|
||||
" )\n",
|
||||
" # Create the compute target\n",
|
||||
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
|
||||
"\n",
|
||||
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
" # If no min node count is provided it will use the scale settings for the cluster\n",
|
||||
" compute_target.wait_for_completion(\n",
|
||||
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # For a more detailed view of current cluster status, use the 'status' property\n",
|
||||
" print(compute_target.status.serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up training parameters\n",
|
||||
"\n",
|
||||
"We need to provide ``ForecastingParameters``, ``AutoMLConfig`` and ``HTSTrainParameters`` objects. For the forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, the hierarchy definition, and the level of the hierarchy at which to train.\n",
|
||||
"\n",
|
||||
"#### ``ForecastingParameters`` arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
||||
"| **time_column_name** | The name of your time column. |\n",
|
||||
"| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
|
||||
"| **cv_step_size** | Number of periods between two consecutive cross-validation folds. The default value is \\\"auto\\\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value. |\n",
|
||||
"\n",
|
||||
"#### ``AutoMLConfig`` arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **task** | forecasting |\n",
|
||||
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
|
||||
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
|
||||
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **experiment_timeout_hours** | Maximum amount of time in hours that each experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. **It does not control the overall timeout for the pipeline run, instead controls the timeout for each training run per partitioned time series.** |\n",
|
||||
"| **label_column_name** | The name of the label column. |\n",
|
||||
"| **n_cross_validations** | Number of cross validation splits. The default value is \\\"auto\\\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
||||
"| **enable_early_stopping** | Flag to enable early termination if the primary metric is no longer improving. |\n",
|
||||
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
|
||||
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
|
||||
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
|
||||
"| **model_explainability** | Flag to disable explaining the best automated ML model at the end of all training iterations. The default is True and will block non-explainable models which may impact the forecast accuracy. For more information, see [Interpretability: model explanations in automated machine learning](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-machine-learning-interpretability-automl). |\n",
|
||||
"\n",
|
||||
"#### ``HTSTrainParameters`` arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **automl_settings** | The ``AutoMLConfig`` object defined above. |\n",
|
||||
"| **hierarchy_column_names** | The names of columns that define the hierarchical structure of the data from highest level to most granular. |\n",
|
||||
"| **training_level** | The level of the hierarchy to be used for training models. |\n",
|
||||
"| **enable_engineered_explanations** | The switch controls engineered explanations. |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007061544
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.runtime._hts.hts_parameters import HTSTrainParameters\n",
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"from azureml.train.automl.automlconfig import AutoMLConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"model_explainability = True\n",
|
||||
"\n",
|
||||
"engineered_explanations = False\n",
|
||||
"# Define your hierarchy. Adjust the settings below based on your dataset.\n",
|
||||
"hierarchy = [\"state\", \"store_id\", \"product_category\", \"SKU\"]\n",
|
||||
"training_level = \"SKU\"\n",
|
||||
"\n",
|
||||
"# Set your forecast parameters. Adjust the settings below based on your dataset.\n",
|
||||
"time_column_name = \"date\"\n",
|
||||
"label_column_name = \"quantity\"\n",
|
||||
"forecast_horizon = 7\n",
|
||||
"\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_settings = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" experiment_timeout_hours=1,\n",
|
||||
" label_column_name=label_column_name,\n",
|
||||
" track_child_runs=False,\n",
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
" pipeline_fetch_max_batch_size=15,\n",
|
||||
" model_explainability=model_explainability,\n",
|
||||
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||
" cv_step_size=\"auto\",\n",
|
||||
" # The following settings are specific to this sample and should be adjusted according to your own needs.\n",
|
||||
" iteration_timeout_minutes=10,\n",
|
||||
" iterations=15,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"hts_parameters = HTSTrainParameters(\n",
|
||||
" automl_settings=automl_settings,\n",
|
||||
" hierarchy_column_names=hierarchy,\n",
|
||||
" training_level=training_level,\n",
|
||||
" enable_engineered_explanations=engineered_explanations,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up hierarchy training pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The ``process_count_per_node`` is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
|
||||
"\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **experiment** | The experiment used for training. |\n",
|
||||
"| **train_data** | The file dataset to be used as input to the training run. |\n",
|
||||
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
|
||||
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node for optimal performance. |\n",
|
||||
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
|
||||
"| **run_invocation_timeout** | Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. This must be greater than ``experiment_timeout_hours`` by at least 300 seconds. |\n",
|
||||
"\n",
|
||||
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.\n",
|
||||
"\n",
|
||||
"**Note**: Total time taken for the **training step** in the pipeline to complete = $ \\frac{t}{ p \\times n } \\times ts $\n",
|
||||
"where,\n",
|
||||
"- $ t $ is time taken for training one partition (can be viewed in the training logs)\n",
|
||||
"- $ p $ is ``process_count_per_node``\n",
|
||||
"- $ n $ is ``node_count``\n",
|
||||
"- $ ts $ is total number of partitions in time series based on ``partition_column_names``"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" train_data=registered_train,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=8,\n",
|
||||
" train_pipeline_parameters=hts_parameters,\n",
|
||||
" run_invocation_timeout=3900,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit the pipeline to run\n",
|
||||
"Next we submit our pipeline to run. The whole training pipeline takes about 1h using a Standard_D16_V3 VM with our current ParallelRunConfig setting."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run = experiment.submit(training_pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### [Optional] Get the explanations\n",
|
||||
"First we need to download the explanations to the local disk."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if model_explainability:\n",
|
||||
" expl_output = training_run.get_pipeline_output(\"explanations\")\n",
|
||||
" expl_output.download(\"training_explanations\")\n",
|
||||
"else:\n",
|
||||
" print(\n",
|
||||
" \"Model explanations are available only if model_explainability is set to True.\"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The explanations are downloaded to the \"training_explanations/azureml\" directory."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if model_explainability:\n",
|
||||
" explanations_dirrectory = os.listdir(\n",
|
||||
" os.path.join(\"training_explanations\", \"azureml\")\n",
|
||||
" )\n",
|
||||
" if len(explanations_dirrectory) > 1:\n",
|
||||
" print(\n",
|
||||
" \"Warning! The directory contains multiple explanations, only the first one will be displayed.\"\n",
|
||||
" )\n",
|
||||
" print(\"The explanations are located at {}.\".format(explanations_dirrectory[0]))\n",
|
||||
" # Now we will list all the explanations.\n",
|
||||
" explanation_path = os.path.join(\n",
|
||||
" \"training_explanations\",\n",
|
||||
" \"azureml\",\n",
|
||||
" explanations_dirrectory[0],\n",
|
||||
" \"training_explanations\",\n",
|
||||
" )\n",
|
||||
" print(\"Available explanations\")\n",
|
||||
" print(\"==============================\")\n",
|
||||
" print(\"\\n\".join(os.listdir(explanation_path)))\n",
|
||||
"else:\n",
|
||||
" print(\n",
|
||||
" \"Model explanations are available only if model_explainability is set to True.\"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"View the explanations on \"state\" level."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import display\n",
|
||||
"\n",
|
||||
"explanation_type = \"raw\"\n",
|
||||
"level = \"state\"\n",
|
||||
"\n",
|
||||
"if model_explainability:\n",
|
||||
" display(\n",
|
||||
" pd.read_csv(\n",
|
||||
" os.path.join(explanation_path, \"{}_explanations_{}.csv\").format(\n",
|
||||
" explanation_type, level\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 5.0 Forecasting\n",
|
||||
"For hierarchical forecasting we need to provide the HTSInferenceParameters object.\n",
|
||||
"#### ``HTSInferenceParameters`` arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **hierarchy_forecast_level:** | The default level of the hierarchy to produce prediction/forecast on. |\n",
|
||||
"| **allocation_method:** | \\[Optional] The disaggregation method to use if the hierarchy forecast level specified is below the define hierarchy training level. <br><i>(average historical proportions) 'average_historical_proportions'</i><br><i>(proportions of the historical averages) 'proportions_of_historical_average'</i> |\n",
|
||||
"\n",
|
||||
"#### ``get_many_models_batch_inference_steps`` arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **experiment** | The experiment used for inference run. |\n",
|
||||
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
|
||||
"| **compute_target** | The compute target that runs the inference pipeline. |\n",
|
||||
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
|
||||
"| **process_count_per_node** | \\[Optional] The number of processes per node. By default it's 2 (should be at most half of the number of cores in a single node of the compute cluster that will be used for the experiment).\n",
|
||||
"| **inference_pipeline_parameters** | \\[Optional] The ``HTSInferenceParameters`` object defined above. |\n",
|
||||
"| **train_run_id** | \\[Optional] The run id of the **training pipeline**. By default it is the latest successful training pipeline run in the experiment. |\n",
|
||||
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
|
||||
"| **run_invocation_timeout** | \\[Optional] Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.runtime._hts.hts_parameters import HTSInferenceParameters\n",
|
||||
"\n",
|
||||
"inference_parameters = HTSInferenceParameters(\n",
|
||||
" hierarchy_forecast_level=\"store_id\", # The setting is specific to this dataset and should be changed based on your dataset.\n",
|
||||
" allocation_method=\"proportions_of_historical_average\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" inference_data=registered_inference,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" inference_pipeline_parameters=inference_parameters,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=8,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"inference_pipeline = Pipeline(ws, steps=steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_run = experiment.submit(inference_pipeline)\n",
|
||||
"inference_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieve results\n",
|
||||
"\n",
|
||||
"Forecast results can be retrieved through the following code. The prediction results summary and the actual predictions are downloaded in forecast_results folder"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"forecasts = inference_run.get_pipeline_output(\"forecasts\")\n",
|
||||
"forecasts.download(\"forecast_results\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Resbumit the Pipeline\n",
|
||||
"\n",
|
||||
"The inference pipeline can be submitted with different configurations."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_run = experiment.submit(\n",
|
||||
" inference_pipeline, pipeline_parameters={\"hierarchy_forecast_level\": \"state\"}\n",
|
||||
")\n",
|
||||
"inference_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "jialiu"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"how-to-use-azureml",
|
||||
"automated-machine-learning"
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
name: auto-ml-forecasting-function
|
||||
dependencies:
|
||||
- py-xgboost<=0.90
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- numpy==1.16.2
|
||||
- pandas==0.23.4
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
|
Before Width: | Height: | Size: 24 KiB |
|
Before Width: | Height: | Size: 24 KiB |
@@ -0,0 +1,122 @@
|
||||
---
|
||||
page_type: sample
|
||||
languages:
|
||||
- python
|
||||
products:
|
||||
- azure-machine-learning
|
||||
description: Tutorial showing how to solve a complex machine learning time series forecasting problems at scale by using Azure Automated ML and Many Models solution accelerator.
|
||||
---
|
||||
|
||||

|
||||
# Many Models Solution Accelerator
|
||||
|
||||
<!--
|
||||
Guidelines on README format: https://review.docs.microsoft.com/help/onboard/admin/samples/concepts/readme-template?branch=master
|
||||
|
||||
Guidance on onboarding samples to docs.microsoft.com/samples: https://review.docs.microsoft.com/help/onboard/admin/samples/process/onboarding?branch=master
|
||||
|
||||
Taxonomies for products and languages: https://review.docs.microsoft.com/new-hope/information-architecture/metadata/taxonomies?branch=master
|
||||
-->
|
||||
|
||||
In the real world, many problems can be too complex to be solved by a single machine learning model. Whether that be predicting sales for each individual store, building a predictive maintanence model for hundreds of oil wells, or tailoring an experience to individual users, building a model for each instance can lead to improved results on many machine learning problems.
|
||||
|
||||
This Pattern is very common across a wide variety of industries and applicable to many real world use cases. Below are some examples we have seen where this pattern is being used.
|
||||
|
||||
- Energy and utility companies building predictive maintenance models for thousands of oil wells, hundreds of wind turbines or hundreds of smart meters
|
||||
|
||||
- Retail organizations building workforce optimization models for thousands of stores, campaign promotion propensity models, Price optimization models for hundreds of thousands of products they sell
|
||||
|
||||
- Restaurant chains building demand forecasting models across thousands of restaurants
|
||||
|
||||
- Banks and financial institutes building models for cash replenishment for ATM Machine and for several ATMs or building personalized models for individuals
|
||||
|
||||
- Enterprises building revenue forecasting models at each division level
|
||||
|
||||
- Document management companies building text analytics and legal document search models per each state
|
||||
|
||||
Azure Machine Learning (AML) makes it easy to train, operate, and manage hundreds or even thousands of models. This repo will walk you through the end to end process of creating a many models solution from training to scoring to monitoring.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
To use this solution accelerator, all you need is access to an [Azure subscription](https://azure.microsoft.com/free/) and an [Azure Machine Learning Workspace](https://docs.microsoft.com/azure/machine-learning/how-to-manage-workspace) that you'll create below.
|
||||
|
||||
While it's not required, a basic understanding of Azure Machine Learning will be helpful for understanding the solution. The following resources can help introduce you to AML:
|
||||
|
||||
1. [Azure Machine Learning Overview](https://azure.microsoft.com/services/machine-learning/)
|
||||
2. [Azure Machine Learning Tutorials](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup)
|
||||
3. [Azure Machine Learning Sample Notebooks on Github](https://github.com/Azure/azureml-examples)
|
||||
|
||||
## Getting started
|
||||
|
||||
### 1. Deploy Resources
|
||||
|
||||
Start by deploying the resources to Azure. The button below will deploy Azure Machine Learning and its related resources:
|
||||
|
||||
<a href="https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2Fmicrosoft%2Fsolution-accelerator-many-models%2Fmaster%2Fazuredeploy.json" target="_blank">
|
||||
<img src="http://azuredeploy.net/deploybutton.png"/>
|
||||
</a>
|
||||
|
||||
### 2. Configure Development Environment
|
||||
|
||||
Next you'll need to configure your [development environment](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment) for Azure Machine Learning. We recommend using a [Compute Instance](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment#compute-instance) as it's the fastest way to get up and running.
|
||||
|
||||
### 3. Run Notebooks
|
||||
|
||||
Once your development environment is set up, run through the Jupyter Notebooks sequentially following the steps outlined. By the end, you'll know how to train, score, and make predictions using the many models pattern on Azure Machine Learning.
|
||||
|
||||

|
||||
|
||||
|
||||
## Contents
|
||||
|
||||
In this repo, you'll train and score a forecasting model for each orange juice brand and for each store at a (simulated) grocery chain. By the end, you'll have forecasted sales by using up to 11,973 models to predict sales for the next few weeks.
|
||||
|
||||
The data used in this sample is simulated based on the [Dominick's Orange Juice Dataset](http://www.cs.unitn.it/~taufer/QMMA/L10-OJ-Data.html#(1)), sales data from a Chicago area grocery store.
|
||||
|
||||
<img src="images/Flow_map.png" width="1000">
|
||||
|
||||
### Using Automated ML to train the models:
|
||||
|
||||
The [`auto-ml-forecasting-many-models.ipynb`](./auto-ml-forecasting-many-models.ipynb) noteboook is a guided solution accelerator that demonstrates steps from data preparation, to model training, and forecasting on train models as well as operationalizing the solution.
|
||||
|
||||
## How-to-videos
|
||||
|
||||
Watch these how-to-videos for a step by step walk-through of the many model solution accelerator to learn how to setup your models using Automated ML.
|
||||
|
||||
### Automated ML
|
||||
|
||||
[](https://channel9.msdn.com/Shows/Docs-AI/Building-Large-Scale-Machine-Learning-Forecasting-Models-using-Azure-Machine-Learnings-Automated-ML)
|
||||
|
||||
## Key concepts
|
||||
|
||||
### ParallelRunStep
|
||||
|
||||
[ParallelRunStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.parallel_run_step.parallelrunstep?view=azure-ml-py) enables the parallel training of models and is commonly used for batch inferencing. This [document](https://docs.microsoft.com/azure/machine-learning/how-to-use-parallel-run-step) walks through some of the key concepts around ParallelRunStep.
|
||||
|
||||
### Pipelines
|
||||
|
||||
[Pipelines](https://docs.microsoft.com/azure/machine-learning/concept-ml-pipelines) allow you to create workflows in your machine learning projects. These workflows have a number of benefits including speed, simplicity, repeatability, and modularity.
|
||||
|
||||
### Automated Machine Learning
|
||||
|
||||
[Automated Machine Learning](https://docs.microsoft.com/azure/machine-learning/concept-automated-ml) also referred to as automated ML or AutoML, is the process of automating the time consuming, iterative tasks of machine learning model development. It allows data scientists, analysts, and developers to build ML models with high scale, efficiency, and productivity all while sustaining model quality.
|
||||
|
||||
### Other Concepts
|
||||
|
||||
In additional to ParallelRunStep, Pipelines and Automated Machine Learning, you'll also be working with the following concepts including [workspace](https://docs.microsoft.com/azure/machine-learning/concept-workspace), [datasets](https://docs.microsoft.com/azure/machine-learning/concept-data#datasets), [compute targets](https://docs.microsoft.com/azure/machine-learning/concept-compute-target#train), [python script steps](https://docs.microsoft.com/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py), and [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/).
|
||||
|
||||
## Contributing
|
||||
|
||||
This project welcomes contributions and suggestions. To learn more visit the [contributing](../../../CONTRIBUTING.md) section.
|
||||
|
||||
Most contributions require you to agree to a Contributor License Agreement (CLA)
|
||||
declaring that you have the right to, and actually do, grant us
|
||||
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
|
||||
|
||||
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
|
||||
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
|
||||
provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
@@ -0,0 +1,898 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<font color=\"red\" size=\"5\"><strong>!Important!</strong> </br>This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/pipelines/1k_demand_forecasting_with_pipeline_components/automl-forecasting-demand-many-models-in-pipeline)).</font>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Many Models - Automated ML\n",
|
||||
"**_Generate many models time series forecasts with Automated Machine Learning_**\n",
|
||||
"\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
|
||||
"\n",
|
||||
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"You'll need to create a compute Instance by following [these](https://learn.microsoft.com/en-us/azure/machine-learning/v1/how-to-create-manage-compute-instance?tabs=python) instructions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1.0 Set up workspace, datastore, experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003526897
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Datastore\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"# Set up your workspace\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"ws.get_details()\n",
|
||||
"\n",
|
||||
"# Set up your datastores\n",
|
||||
"dstore = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output[\"SDK version\"] = azureml.core.VERSION\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Default datastore name\"] = dstore.name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose an experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003540729
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, \"automl-many-models\")\n",
|
||||
"\n",
|
||||
"print(\"Experiment name: \" + experiment.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2.0 Data\n",
|
||||
"\n",
|
||||
"This notebook uses simulated orange juice sales data to walk you through the process of training many models on Azure Machine Learning using Automated ML. \n",
|
||||
"\n",
|
||||
"The time series data used in this example was simulated based on the University of Chicago's Dominick's Finer Foods dataset which featured two years of sales of 3 different orange juice brands for individual stores. The full simulated dataset includes 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern.\n",
|
||||
"\n",
|
||||
" \n",
|
||||
"In this notebook, two datasets will be created: one with all 11,973 files and one with only 10 files that can be used to quickly test and debug. For each dataset, you'll be walked through the process of:\n",
|
||||
"\n",
|
||||
"1. Registering the blob container as a Datastore to the Workspace\n",
|
||||
"2. Registering a tabular dataset to the Workspace"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### 2.1 Data Preparation\n",
|
||||
"The OJ data is available in the public blob container. The data is split to be used for training and for inferencing. For the current dataset, the data was split on time column ('WeekStarting') before and after '1992-5-28' .\n",
|
||||
"\n",
|
||||
"The container has\n",
|
||||
"<ol>\n",
|
||||
" <li><b>'oj-data-tabular'</b> and <b>'oj-inference-tabular'</b> folders that contains training and inference data respectively for the 11,973 models. </li>\n",
|
||||
" <li>It also has <b>'oj-data-small-tabular'</b> and <b>'oj-inference-small-tabular'</b> folders that has training and inference data for 10 models.</li>\n",
|
||||
"</ol>\n",
|
||||
"\n",
|
||||
"To create the [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) needed for the ParallelRunStep, you first need to register the blob container to the workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"<b> To use your own data, put your own data in a blobstore folder. As shown it can be one file or multiple files. We can then register datastore using that blob as shown below.\n",
|
||||
" \n",
|
||||
"<h3> How sample data in blob store looks like</h3>\n",
|
||||
"\n",
|
||||
"['oj-data-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)</b>\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"['oj-inference-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"['oj-data-small-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"['oj-inference-small-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 2.2 Register the blob container as DataStore\n",
|
||||
"\n",
|
||||
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
|
||||
"\n",
|
||||
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.\n",
|
||||
"\n",
|
||||
"In this next step, we will be registering blob storage as datastore to the Workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Datastore\n",
|
||||
"\n",
|
||||
"# Please change the following to point to your own blob container and pass in account_key\n",
|
||||
"blob_datastore_name = \"automl_many_models\"\n",
|
||||
"container_name = \"automl-sample-notebook-data\"\n",
|
||||
"account_name = \"automlsamplenotebookdata\"\n",
|
||||
"\n",
|
||||
"oj_datastore = Datastore.register_azure_blob_container(\n",
|
||||
" workspace=ws,\n",
|
||||
" datastore_name=blob_datastore_name,\n",
|
||||
" container_name=container_name,\n",
|
||||
" account_name=account_name,\n",
|
||||
" create_if_not_exists=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 2.3 Using tabular datasets \n",
|
||||
"\n",
|
||||
"Now that the datastore is available from the Workspace, [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) can be created. Datasets in Azure Machine Learning are references to specific data in a Datastore. We are using TabularDataset, so that users who have their data which can be in one or many files (*.parquet or *.csv) and have not split up data according to group columns needed for training, can do so using out of box support for 'partiion_by' feature of TabularDataset shown in section 5.0 below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007017296
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"\n",
|
||||
"ds_name_small = \"oj-data-small-tabular\"\n",
|
||||
"input_ds_small = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=oj_datastore.path(ds_name_small + \"/\"), validate=False\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"inference_name_small = \"oj-inference-small-tabular\"\n",
|
||||
"inference_ds_small = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=oj_datastore.path(inference_name_small + \"/\"), validate=False\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 2.4 Configure data with ``OutputFileDatasetConfig`` objects\n",
|
||||
"This step shows how to configure output data from a pipeline step. One of the use cases for this step is when you want to do some preprocessing before feeding the data to training step. Intermediate data (or output of a step) is represented by an ``OutputFileDatasetConfig`` object. ``output_data`` is produced as the output of a step. Optionally, this data can be registered as a dataset by calling the ``register_on_complete`` method. If you create an ``OutputFileDatasetConfig`` in one step and use it as an input to another step, that data dependency between steps creates an implicit execution order in the pipeline.\n",
|
||||
"\n",
|
||||
"``OutputFileDatasetConfig`` objects return a directory, and by default write output to the default datastore of the workspace.\n",
|
||||
"\n",
|
||||
"Since instance creation for class ``OutputTabularDatasetConfig`` is not allowed, we first create an instance of this class. Then we use the ``read_parquet_files`` method to read the parquet file into ``OutputTabularDatasetConfig``."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data.output_dataset_config import OutputFileDatasetConfig\n",
|
||||
"\n",
|
||||
"output_data = OutputFileDatasetConfig(\n",
|
||||
" name=\"processed_data\", destination=(dstore, \"outputdataset/{run-id}/{output-name}\")\n",
|
||||
").as_upload()\n",
|
||||
"# output_data_dataset = output_data.register_on_complete(\n",
|
||||
"# name='processed_data', description = 'files from prev step')\n",
|
||||
"output_data = output_data.read_parquet_files()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3.0 Build the training pipeline\n",
|
||||
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose a compute target\n",
|
||||
"\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
|
||||
"\n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007037308
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"\n",
|
||||
"# Name your cluster\n",
|
||||
"compute_name = \"mm-compute-v1\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if compute_name in ws.compute_targets:\n",
|
||||
" compute_target = ws.compute_targets[compute_name]\n",
|
||||
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||
" print(\"Found compute target: \" + compute_name)\n",
|
||||
"else:\n",
|
||||
" print(\"Creating a new compute target...\")\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_D14_V2\", max_nodes=20\n",
|
||||
" )\n",
|
||||
" # Create the compute target\n",
|
||||
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
|
||||
"\n",
|
||||
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
" # If no min node count is provided it will use the scale settings for the cluster\n",
|
||||
" compute_target.wait_for_completion(\n",
|
||||
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # For a more detailed view of current cluster status, use the 'status' property\n",
|
||||
" print(compute_target.status.serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure the training run's environment\n",
|
||||
"The next step is making sure that the remote training run has all the dependencies needed by the training steps. Dependencies and the runtime context are set by creating and configuring a RunConfiguration object.\n",
|
||||
"\n",
|
||||
"The code below shows two options for handling dependencies. As presented, with ``USE_CURATED_ENV = True``, the configuration is based on a [curated environment](https://docs.microsoft.com/en-us/azure/machine-learning/resource-curated-environments). Curated environments have prebuilt Docker images in the [Microsoft Container Registry](https://hub.docker.com/publishers/microsoftowner). For more information, see [Azure Machine Learning curated environments](https://docs.microsoft.com/en-us/azure/machine-learning/resource-curated-environments).\n",
|
||||
"\n",
|
||||
"The path taken if you change ``USE_CURATED_ENV`` to False shows the pattern for explicitly setting your dependencies. In that scenario, a new custom Docker image will be created and registered in an Azure Container Registry within your resource group (see [Introduction to private Docker container registries in Azure](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-intro)). Building and registering this image can take quite a few minutes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"aml_run_config = RunConfiguration()\n",
|
||||
"aml_run_config.target = compute_target\n",
|
||||
"\n",
|
||||
"USE_CURATED_ENV = True\n",
|
||||
"if USE_CURATED_ENV:\n",
|
||||
" curated_environment = Environment.get(\n",
|
||||
" workspace=ws, name=\"AzureML-sklearn-1.5\"\n",
|
||||
" )\n",
|
||||
" aml_run_config.environment = curated_environment\n",
|
||||
"else:\n",
|
||||
" aml_run_config.environment.python.user_managed_dependencies = False\n",
|
||||
"\n",
|
||||
" # Add some packages relied on by data prep step\n",
|
||||
" aml_run_config.environment.python.conda_dependencies = CondaDependencies.create(\n",
|
||||
" conda_packages=[\"pandas\", \"scikit-learn\"],\n",
|
||||
" pip_packages=[\"azureml-sdk\", \"azureml-dataset-runtime[fuse,pandas]\"],\n",
|
||||
" pin_sdk_version=False,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up training parameters\n",
|
||||
"\n",
|
||||
"We need to provide ``ForecastingParameters``, ``AutoMLConfig`` and ``ManyModelsTrainParameters`` objects. For the forecasting task we also need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name(s) definition.\n",
|
||||
"\n",
|
||||
"#### ``ForecastingParameters`` arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
||||
"| **time_column_name** | The name of your time column. |\n",
|
||||
"| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
|
||||
"| **cv_step_size** | Number of periods between two consecutive cross-validation folds. The default value is \\\"auto\\\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value. |\n",
|
||||
"\n",
|
||||
"#### ``AutoMLConfig`` arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **task** | forecasting |\n",
|
||||
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
|
||||
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
|
||||
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **experiment_timeout_hours** | Maximum amount of time in hours that each experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. **It does not control the overall timeout for the pipeline run, instead controls the timeout for each training run per partitioned time series.** |\n",
|
||||
"| **label_column_name** | The name of the label column. |\n",
|
||||
"| **n_cross_validations** | Number of cross validation splits. The default value is \\\"auto\\\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
||||
"| **enable_early_stopping** | Flag to enable early termination if the primary metric is no longer improving. |\n",
|
||||
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
|
||||
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
|
||||
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"#### ``ManyModelsTrainParameters`` arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **automl_settings** | The ``AutoMLConfig`` object defined above. |\n",
|
||||
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007061544
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
|
||||
" ManyModelsTrainParameters,\n",
|
||||
")\n",
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"from azureml.train.automl.automlconfig import AutoMLConfig\n",
|
||||
"\n",
|
||||
"partition_column_names = [\"Store\", \"Brand\"]\n",
|
||||
"\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=\"WeekStarting\",\n",
|
||||
" forecast_horizon=6,\n",
|
||||
" time_series_id_column_names=partition_column_names,\n",
|
||||
" cv_step_size=\"auto\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_settings = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" iteration_timeout_minutes=10,\n",
|
||||
" iterations=15,\n",
|
||||
" experiment_timeout_hours=0.25,\n",
|
||||
" label_column_name=\"Quantity\",\n",
|
||||
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
|
||||
" track_child_runs=False,\n",
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"mm_paramters = ManyModelsTrainParameters(\n",
|
||||
" automl_settings=automl_settings, partition_column_names=partition_column_names\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Construct your pipeline steps\n",
|
||||
"Once you have the compute resource and environment created, you're ready to define your pipeline's steps. There are many built-in steps available via the Azure Machine Learning SDK, as you can see on the [reference documentation for the azureml.pipeline.steps package](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps?view=azure-ml-py). The most flexible class is [PythonScriptStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py), which runs a Python script.\n",
|
||||
"\n",
|
||||
"Your data preparation code is in a subdirectory (in this example, \"data_preprocessing_tabular.py\" in the directory \"./scripts\"). As part of the pipeline creation process, this directory is zipped and uploaded to the compute_target and the step runs the script specified as the value for ``script_name``.\n",
|
||||
"\n",
|
||||
"The ``arguments`` values specify the inputs and outputs of the step. In the example below, the baseline data is the ``input_ds_small`` dataset. The script data_preprocessing_tabular.py does whatever data-transformation tasks are appropriate to the task at hand and outputs the data to ``output_data``, of type ``OutputFileDatasetConfig``. For more information, see [Moving data into and between ML pipeline steps (Python)](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-move-data-in-out-of-pipelines). The step will run on the machine defined by ``compute_target``, using the configuration ``aml_run_config``.\n",
|
||||
"\n",
|
||||
"Reuse of previous results (``allow_reuse``) is key when using pipelines in a collaborative environment since eliminating unnecessary reruns offers agility. Reuse is the default behavior when the ``script_name``, ``inputs``, and the parameters of a step remain the same. When reuse is allowed, results from the previous run are immediately sent to the next step. If ``allow_reuse`` is set to False, a new run will always be generated for this step during pipeline execution.\n",
|
||||
"\n",
|
||||
"> Note that we only support partitioned FileDataset and TabularDataset without partition when using such output as input.\n",
|
||||
"\n",
|
||||
"> Note that we **drop column** \"Revenue\" from the dataset in this step to avoid information leak as \"Quantity\" = \"Revenue\" / \"Price\". **Please modify the logic based on your data**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.steps import PythonScriptStep\n",
|
||||
"\n",
|
||||
"dataprep_source_dir = \"./scripts\"\n",
|
||||
"entry_point = \"data_preprocessing_tabular.py\"\n",
|
||||
"ds_input = input_ds_small.as_named_input(\"train_10_models\")\n",
|
||||
"\n",
|
||||
"data_prep_step = PythonScriptStep(\n",
|
||||
" script_name=entry_point,\n",
|
||||
" source_directory=dataprep_source_dir,\n",
|
||||
" arguments=[\"--input\", ds_input, \"--output\", output_data],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" runconfig=aml_run_config,\n",
|
||||
" allow_reuse=False,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"input_ds_small = output_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up many models pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The ``process_count_per_node`` is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
|
||||
"\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **experiment** | The experiment used for training. |\n",
|
||||
"| **train_data** | The file dataset to be used as input to the training run. |\n",
|
||||
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
|
||||
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node for optimal performance. |\n",
|
||||
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
|
||||
"| **run_invocation_timeout** | Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. This must be greater than ``experiment_timeout_hours`` by at least 300 seconds. |\n",
|
||||
"\n",
|
||||
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.\n",
|
||||
"\n",
|
||||
"**Note**: Total time taken for the **training step** in the pipeline to complete = $ \\frac{t}{ p \\times n } \\times ts $\n",
|
||||
"where,\n",
|
||||
"- $ t $ is time taken for training one partition (can be viewed in the training logs)\n",
|
||||
"- $ p $ is ``process_count_per_node``\n",
|
||||
"- $ n $ is ``node_count``\n",
|
||||
"- $ ts $ is total number of partitions in time series based on ``partition_column_names``"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" train_data=input_ds_small,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=8,\n",
|
||||
" run_invocation_timeout=1200,\n",
|
||||
" train_pipeline_parameters=mm_paramters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit the pipeline to run\n",
|
||||
"Next we submit our pipeline to run. The whole training pipeline takes about 40m using a STANDARD_D16S_V3 VM with our current ParallelRunConfig setting."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run = experiment.submit(training_pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 5.0 Publish and schedule the train pipeline (Optional)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 5.1 Publish the pipeline\n",
|
||||
"\n",
|
||||
"Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# published_pipeline = training_pipeline.publish(name = 'automl_train_many_models',\n",
|
||||
"# description = 'train many models',\n",
|
||||
"# version = '1',\n",
|
||||
"# continue_on_step_failure = False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 5.2 Schedule the pipeline\n",
|
||||
"You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
|
||||
"\n",
|
||||
"# training_pipeline_id = published_pipeline.id\n",
|
||||
"\n",
|
||||
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
|
||||
"# recurring_schedule = Schedule.create(ws, name=\"automl_training_recurring_schedule\",\n",
|
||||
"# description=\"Schedule Training Pipeline to run on the first day of every month\",\n",
|
||||
"# pipeline_id=training_pipeline_id,\n",
|
||||
"# experiment_name=experiment.name,\n",
|
||||
"# recurrence=recurrence)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 6.0 Forecasting"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up output dataset for inference data\n",
|
||||
"Output of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data import OutputFileDatasetConfig\n",
|
||||
"\n",
|
||||
"output_inference_data_ds = OutputFileDatasetConfig(\n",
|
||||
" name=\"many_models_inference_output\", destination=(dstore, \"oj/inference_data/\")\n",
|
||||
").register_on_complete(name=\"oj_inference_data_ds\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For many models we need to provide the ManyModelsInferenceParameters object.\n",
|
||||
"\n",
|
||||
"#### ``ManyModelsInferenceParameters`` arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **partition_column_names** | List of column names that identifies groups. |\n",
|
||||
"| **target_column_name** | \\[Optional] Column name only if the inference dataset has the target. |\n",
|
||||
"| **time_column_name** | \\[Optional] Time column name only if it is timeseries. |\n",
|
||||
"| **inference_type** | \\[Optional] Which inference method to use on the model. Possible values are 'forecast', 'predict_proba', and 'predict'. |\n",
|
||||
"| **forecast_mode** | \\[Optional] The type of forecast to be used, either 'rolling' or 'recursive'; defaults to 'recursive'. |\n",
|
||||
"| **step** | \\[Optional] Number of periods to advance the forecasting window in each iteration **(for rolling forecast only)**; defaults to 1. |\n",
|
||||
"\n",
|
||||
"#### ``get_many_models_batch_inference_steps`` arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **experiment** | The experiment used for inference run. |\n",
|
||||
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
|
||||
"| **compute_target** | The compute target that runs the inference pipeline. |\n",
|
||||
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
|
||||
"| **process_count_per_node** | \\[Optional] The number of processes per node. By default it's 2 (should be at most half of the number of cores in a single node of the compute cluster that will be used for the experiment).\n",
|
||||
"| **inference_pipeline_parameters** | \\[Optional] The ``ManyModelsInferenceParameters`` object defined above. |\n",
|
||||
"| **append_row_file_name** | \\[Optional] The name of the output file (optional, default value is 'parallel_run_step.txt'). Supports 'txt' and 'csv' file extension. A 'txt' file extension generates the output in 'txt' format with space as separator without column names. A 'csv' file extension generates the output in 'csv' format with comma as separator and with column names. |\n",
|
||||
"| **train_run_id** | \\[Optional] The run id of the **training pipeline**. By default it is the latest successful training pipeline run in the experiment. |\n",
|
||||
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
|
||||
"| **run_invocation_timeout** | \\[Optional] Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **output_datastore** | \\[Optional] The ``Datastore`` or ``OutputDatasetConfig`` to be used for output. If specified any pipeline output will be written to that location. If unspecified the default datastore will be used. |\n",
|
||||
"| **arguments** | \\[Optional] Arguments to be passed to inference script. Possible argument is '--forecast_quantiles' followed by quantile values. |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
|
||||
" ManyModelsInferenceParameters,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"mm_parameters = ManyModelsInferenceParameters(\n",
|
||||
" partition_column_names=[\"Store\", \"Brand\"],\n",
|
||||
" time_column_name=\"WeekStarting\",\n",
|
||||
" target_column_name=\"Quantity\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"output_file_name = \"parallel_run_step.csv\"\n",
|
||||
"\n",
|
||||
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" inference_data=inference_ds_small,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=8,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" run_invocation_timeout=300,\n",
|
||||
" output_datastore=output_inference_data_ds,\n",
|
||||
" train_run_id=training_run.id,\n",
|
||||
" train_experiment_name=training_run.experiment.name,\n",
|
||||
" inference_pipeline_parameters=mm_parameters,\n",
|
||||
" append_row_file_name=output_file_name,\n",
|
||||
" arguments=[\"--forecast_quantiles\", 0.1, 0.9],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"inference_pipeline = Pipeline(ws, steps=inference_steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_run = experiment.submit(inference_pipeline)\n",
|
||||
"inference_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieve results\n",
|
||||
"\n",
|
||||
"The forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. \n",
|
||||
"\n",
|
||||
"The following code snippet:\n",
|
||||
"1. Downloads the contents of the output folder that is passed in the parallel run step \n",
|
||||
"2. Reads the output file that has the predictions as pandas dataframe and \n",
|
||||
"3. Displays the top 10 rows of the predictions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n",
|
||||
"\n",
|
||||
"forecasting_results_name = \"forecasting_results\"\n",
|
||||
"forecasting_output_name = \"many_models_inference_output\"\n",
|
||||
"forecast_file = get_output_from_mm_pipeline(\n",
|
||||
" inference_run, forecasting_results_name, forecasting_output_name, output_file_name\n",
|
||||
")\n",
|
||||
"df = pd.read_csv(forecast_file)\n",
|
||||
"print(\n",
|
||||
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
|
||||
")\n",
|
||||
"df.head(10)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 7.0 Publish and schedule the inference pipeline (Optional)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 7.1 Publish the pipeline\n",
|
||||
"\n",
|
||||
"Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# published_pipeline_inf = inference_pipeline.publish(name = 'automl_forecast_many_models',\n",
|
||||
"# description = 'forecast many models',\n",
|
||||
"# version = '1',\n",
|
||||
"# continue_on_step_failure = False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 7.2 Schedule the pipeline\n",
|
||||
"You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
|
||||
"\n",
|
||||
"# forecasting_pipeline_id = published_pipeline.id\n",
|
||||
"\n",
|
||||
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
|
||||
"# recurring_schedule = Schedule.create(ws, name=\"automl_forecasting_recurring_schedule\",\n",
|
||||
"# description=\"Schedule Forecasting Pipeline to run on the first day of every week\",\n",
|
||||
"# pipeline_id=forecasting_pipeline_id,\n",
|
||||
"# experiment_name=experiment.name,\n",
|
||||
"# recurrence=recurrence)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "jialiu"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"how-to-use-azureml",
|
||||
"automated-machine-learning"
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.8 - AzureML",
|
||||
"language": "python",
|
||||
"name": "python38-azureml"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.10"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
After Width: | Height: | Size: 32 KiB |
|
After Width: | Height: | Size: 306 KiB |
|
After Width: | Height: | Size: 2.6 MiB |
|
After Width: | Height: | Size: 106 KiB |
|
After Width: | Height: | Size: 158 KiB |
|
After Width: | Height: | Size: 80 KiB |
|
After Width: | Height: | Size: 68 KiB |
|
After Width: | Height: | Size: 631 KiB |
|
After Width: | Height: | Size: 176 KiB |
|
After Width: | Height: | Size: 165 KiB |
|
After Width: | Height: | Size: 162 KiB |
|
After Width: | Height: | Size: 166 KiB |